text
stringlengths 820
1M
| score
float64 0
0.24
|
---|---|
"""
DBGET Database Interface
========================
"""
from __future__ import absolute_import
import sys
import re
from contextlib import closing
from . import entry
from .entry import fields
from . import api
def iter_take(source_iter, n):
"""
Return a list of the first `n` items in `source_iter`.
"""
source_iter = iter(source_iter)
return [item for _, item in zip(range(n), source_iter)]
def batch_iter(source_iter, n):
"""
Split the `source_iter` into batches of size `n`.
"""
source_iter = iter(source_iter)
while True:
batch = iter_take(source_iter, n)
if batch:
yield batch
else:
break
def chain_iter(chains_iter):
for iter in chains_iter:
for element in iter:
yield element
# TODO: DBDataBase should be able to be constructed from a flat text
# entry file. The precache etc. should be moved in caching api, that creates
# simple file system hierarchy where the flat database is saved (with db
# release string), e.g.
# genes/hsa.dbget
# genes/hsa.release
# genes/sce.dbget
# path.dbget
# module.dbget
# ligand/compound.dbget
class DBDataBase(object):
"""
Base class for a DBGET database interface.
"""
#: ENTRY_TYPE constructor (a :class:`~.entry.DBEntry` subclass). This
#: should be redefined in subclasses.
ENTRY_TYPE = entry.DBEntry
#: A database name/abbreviation (e.g. 'pathway'). Needs to be set in a
#: subclass or object instance's constructor before calling the base.
#: __init__
DB = None
def __init__(self, **kwargs):
if not self.DB:
raise TypeError("Cannot make an instance of abstract base "
"class %r." % type(self).__name__)
self.api = api.CachedKeggApi()
self._info = None
#TODO invalidate cache by KEGG release
#self.api.set_default_release(self.info.release)
self._keys = []
@property
def info(self): #lazy info loading
if not self._info:
self._info = self.api.info(self.DB)
return self._info
def iterkeys(self):
"""
Return an iterator over the `keys`.
"""
return iter(self._keys)
def iteritems(self):
"""
Return an iterator over the `items`.
"""
batch_size = 100
iterkeys = self.iterkeys()
return chain_iter(zip(batch, self.batch_get(batch))
for batch in batch_iter(iterkeys, batch_size))
def itervalues(self):
"""
Return an iterator over all :obj:`DBDataBase.ENTRY_TYPE` instances.
"""
batch_size = 100
iterkeys = self.iterkeys()
return chain_iter(self.batch_get(batch)
for batch in batch_iter(iterkeys, batch_size))
if sys.version_info < (3, ):
def keys(self):
"""
Return a list of database keys. These are unique KEGG identifiers
that can be used to query the database.
"""
return list(self._keys)
def values(self):
"""
Return a list of all :obj:`DBDataBase.ENTRY_TYPE` instances.
"""
return self.batch_get(self.keys())
def items(self):
"""
Return a list of all (key, :obj:`DBDataBase.ENTRY_TYPE` instance)
tuples.
"""
return list(zip(self.keys(), self.batch_get(self.keys())))
else:
def keys(self):
"""
Return an iterator over all database keys. These are unique
KEGG identifiers that can be used to query the database.
"""
return iter(self._keys)
def values(self):
"""
Return an iterator over all :obj:`DBDataBase.ENTRY_TYPE` instances.
"""
return self.itervalues()
def items(self):
"""
Return an iterator over all (key, :obj:`DBDataBase.ENTRY_TYPE`)
tuples.
"""
return self.iteritems()
def get(self, key, default=None):
"""
Return an :obj:`DBDataBase.ENTRY_TYPE` instance for the `key`.
Raises :class:`KeyError` if not found.
"""
try:
return self.__getitem__(key)
except KeyError:
return default
def has_key(self, key):
return self.__contains__(key)
def __getitem__(self, key):
e = self.get_entry(key)
if e is None:
raise KeyError(key)
else:
return e
def __contains__(self, key):
return key in set(self.keys())
def __len__(self):
return len(self.keys())
def __iter__(self):
return iter(self.keys())
def get_text(self, key):
"""
Return the database entry for `key` as plain text.
"""
key = self._add_db(key)
return self.api.get([key])
def get_entry(self, key):
"""
Return the database entry for `key` as an instance of `ENTRY_TYPE`.
"""
text = self.get_text(key)
if not text or text == "None":
return None
else:
return self.ENTRY_TYPE(text)
def find(self, name):
"""
Find `name` using kegg `find` api.
"""
res = self.api.find(self.DB, name).splitlines()
return [r.split(" ", 1)[0] for r in res]
def pre_cache(self, keys=None, batch_size=10, progress_callback=None):
"""
Retrieve all the entries for `keys` and cache them locally for faster
subsequent retrieval. If `keys` is ``None`` then all entries will be
retrieved.
"""
if not isinstance(self.api, api.CachedKeggApi):
raise TypeError("Not an instance of api.CachedKeggApi")
if batch_size > 10 or batch_size < 1:
raise ValueError("Invalid batch_size")
if keys is None:
keys = self.keys()
keys = map(self._add_db, keys)
get = self.api.get
# drop all keys with a valid cache entry to minimize the number
# of 'get' requests.
with closing(get.cache_store()) as store:
def is_uncached(key):
cache_key = get.key_from_args((key,))
return not get.key_has_valid_cache(cache_key, store)
keys = [key for key in keys if is_uncached(key)]
start = 0
while start < len(keys):
batch = keys[start: start + batch_size]
self.api.get(batch)
if progress_callback:
progress_callback(100.0 * start / len(keys))
start += batch_size
def batch_get(self, keys):
"""
Batch retrieve all entries for keys. This can be significantly
faster then getting each entry separately especially if entries
are not yet cached.
"""
entries = []
batch_size = 10
keys = list(map(self._add_db, keys))
# Precache the entries first
self.pre_cache(keys)
start = 0
while start < len(keys):
batch = keys[start: start + batch_size]
batch_entries = self.api.get(batch)
if batch_entries is not None:
batch_entries = batch_entries.split("///\n")
# Remove possible empty last line
batch_entries = [e for e in batch_entries if e.strip()]
entries.extend(map(self.ENTRY_TYPE, batch_entries))
start += batch_size
return entries
def _add_db(self, key):
"""
Prefix the key with '%(DB)s:' string if not already prefixed.
"""
if not key.startswith(self.DB + ":"):
return self.DB + ":" + key
else:
return key
@entry.entry_decorate
class GenomeEntry(entry.DBEntry):
"""
Entry for a KEGG Genome database.
"""
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("DEFINITION", fields.DBDefinitionField),
("ANNOTATION", fields.DBSimpleField),
("TAXONOMY", fields.DBTaxonomyField),
("DATA_SOURCE", fields.DBSimpleField),
("ORIGINAL_DB", fields.DBSimpleField),
("KEYWORDS", fields.DBSimpleField),
("DISEASE", fields.DBSimpleField),
("COMMENT", fields.DBSimpleField),
("CHROMOSOME", fields.DBFieldWithSubsections),
("PLASMID", fields.DBSimpleField),
("STATISTICS", fields.DBSimpleField),
("REFERENCE", fields.DBReference)
]
MULTIPLE_FIELDS = ["REFERENCE"]
def __init__(self, text):
entry.DBEntry.__init__(self, text)
@property
def organism_code(self):
"""
A three or four letter KEGG organism code (e.g. 'hsa', 'sce', ...)
"""
return self.name.split(",", 1)[0]
@property
def taxid(self):
"""
Organism NCBI taxonomy id.
"""
return self.TAXONOMY.taxid
def org_code(self):
# for backwards compatibility; return the `organism_code`
return self.organism_code
class Genome(DBDataBase):
"""
An interface to the A KEGG GENOME database.
"""
DB = "genome"
ENTRY_TYPE = GenomeEntry
# For obiTaxonomy.common_taxids mapping
TAXID_MAP = {
"562": "511145", # Escherichia coli K-12 MG1655
"2104": "272634", # Mycoplasma pneumoniae M129
"4530": "39947", # Oryza sativa ssp. japonica cultivar Nipponbare (Japanese rice)
"4932": "559292", # Saccharomyces cerevisiae S288C
"4896": "284812", # Schizosaccharomyces pombe 972h-
}
def __init__(self):
DBDataBase.__init__(self)
self._org_list = self.api.list_organisms()
self._keys = [org.entry_id for org in self._org_list]
def _key_to_gn_entry_id(self, key):
res = self.find(key)
if len(res) == 0:
raise KeyError("Unknown key")
elif len(res) > 1:
raise ValueError("Not a unique key")
else:
return res[0]
@classmethod
def common_organisms(cls):
return ['ath', 'bta', 'cel', 'cre', 'dre', 'ddi',
'dme', 'eco', 'hsa', 'mmu', 'mpn', 'osa',
'pfa', 'rno', 'sce', 'spo', 'zma', 'xla']
@classmethod
def essential_organisms(cls):
return ['ddi', 'dme', 'hsa', 'mmu', 'sce']
def org_code_to_entry_key(self, code):
"""
Map an organism code ('hsa', 'sce', ...) to the corresponding kegg
identifier (T + 5 digit number).
"""
for org in self._org_list:
if org.org_code == code:
return org.entry_id
else:
raise ValueError("Unknown organism code '%s'" % code)
def search(self, string, relevance=False):
"""
Search the genome database for string using ``bfind``.
"""
if relevance:
raise NotImplementedError("relevance is no longer supported")
if string in self.TAXID_MAP:
string = self.TAXID_MAP[string]
res = self.api.find(self.DB, string).strip()
if not res:
return []
res = res.splitlines()
res = [r.split(",", 1)[0] for r in res]
res = [r.split(None, 1)[1] for r in res]
return res
@entry.entry_decorate
class GeneEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("DEFINITION", fields.DBDefinitionField),
("ORTHOLOGY", fields.DBSimpleField),
("ORGANISM", fields.DBSimpleField),
("PATHWAY", fields.DBPathway),
("MODULE", fields.DBSimpleField),
("DISEASE", fields.DBSimpleField),
("DRUG_TARGET", fields.DBSimpleField),
("CLASS", fields.DBSimpleField),
("MOTIF", fields.DBSimpleField),
("DBLINKS", fields.DBDBLinks),
("STRUCTURE", fields.DBSimpleField),
("POSITION", fields.DBSimpleField),
("AASEQ", fields.DBAASeq),
("NTSEQ", fields.DBNTSeq)
]
def aliases(self):
return [self.entry_key] + \
(self.name.split(",") if self.name else []) + \
([link[1][0] for link in self.dblinks.items()]
if self.dblinks else [])
@property
def alt_names(self):
"""
For backwards compatibility.
"""
return self.aliases()
class Genes(DBDataBase):
"""
Interface to the KEGG Genes database.
:param str org_code: KEGG organism code (e.g. 'hsa').
"""
DB = None # Needs to be set in __init__
ENTRY_TYPE = GeneEntry
def __init__(self, org_code):
# TODO: Map to org code from kegg id (T + 5 digits)
self.DB = org_code
self.org_code = org_code
DBDataBase.__init__(self)
self._keys = self.api.get_genes_by_organism(org_code)
def gene_aliases(self):
aliases = {}
for entry in self.itervalues():
aliases.update(
dict.fromkeys(entry.aliases(),
self.org_code + ":" + entry.entry_key)
)
return aliases
@entry.entry_decorate
class CompoundEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("FORMULA", fields.DBSimpleField),
("EXACT_MASS", fields.DBSimpleField),
("MOL_WEIGHT", fields.DBSimpleField),
("REMARK", fields.DBSimpleField),
("COMMENT", fields.DBSimpleField),
("REACTION", fields.DBSimpleField),
("PATHWAY", fields.DBPathway),
("ENZYME", fields.DBSimpleField),
("BRITE", fields.DBSimpleField),
("REFERENCE", fields.DBSimpleField),
("DBLINKS", fields.DBDBLinks),
("ATOM", fields.DBSimpleField),
("BOND", fields.DBSimpleField)
]
class Compound(DBDataBase):
DB = "cpd"
ENTRY_TYPE = CompoundEntry
def __init__(self):
DBDataBase.__init__(self)
self._keys = [d.entry_id for d in self.api.list("cpd")]
@entry.entry_decorate
class ReactionEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("DEFINITION", fields.DBDefinitionField),
("EQUATION", fields.DBSimpleField),
("ENZYME", fields.DBSimpleField)
]
class Reaction(DBDataBase):
DB = "rn"
ENTRY_TYPE = ReactionEntry
def __init__(self):
DBDataBase.__init__(self)
self._keys = [d.entry_id for d in self.api.list("rn")]
class Brite(DBDataBase):
DB = "br"
class Disease(DBDataBase):
DB = "ds"
class Drug(DBDataBase):
DB = "dr"
@entry.entry_decorate
class EnzymeEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("CLASS", fields.DBSimpleField),
("SYSNAME", fields.DBSimpleField),
("REACTION", fields.DBSimpleField),
("ALL_REAC", fields.DBSimpleField),
("SUBSTRATE", fields.DBSimpleField),
("PRODUCT", fields.DBSimpleField),
("COMMENT", fields.DBSimpleField),
("REFERENCE", fields.DBReference),
("PATHWAY", fields.DBPathway),
("ORTHOLOGY", fields.DBSimpleField),
("GENES", fields.DBSimpleField),
("DBLINKS", fields.DBDBLinks)
]
MULTIPLE_FIELDS = ["REFERENCE"]
class Enzyme(DBDataBase):
DB = "ec"
ENTRY_TYPE = EnzymeEntry
def __init__(self):
DBDataBase.__init__(self)
self._keys = [d.entry_id for d in self.api.list("ec")]
@entry.entry_decorate
class OrthologyEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("CLASS", fields.DBSimpleField),
("DBLINKS", fields.DBDBLinks),
("GENES", fields.DBSimpleField),
]
class Orthology(DBDataBase):
DB = "ko"
ENTRY_TYPE = OrthologyEntry
def __init__(self):
DBDataBase.__init__(self)
self._keys = [d.entry_id for d in self.api.list("ko")]
@entry.entry_decorate
class PathwayEntry(entry.DBEntry):
FIELDS = [
("ENTRY", fields.DBEntryField),
("NAME", fields.DBNameField),
("DESCRIPTION", fields.DBSimpleField),
("CLASS", fields.DBSimpleField),
("PATHWAY_MAP", fields.DBPathwayMapField),
("MODULE", fields.DBSimpleField),
("DISEASE", fields.DBSimpleField),
("DRUG", fields.DBSimpleField),
("DBLINKS", fields.DBDBLinks),
("ORGANISM", fields.DBSimpleField),
("GENE", fields.DBGeneField),
("ENZYME", fields.DBEnzymeField),
("COMPOUND", fields.DBCompoundField),
("REFERENCE", fields.DBReference),
("REL_PATHWAY", fields.DBSimpleField),
("KO_PATHWAY", fields.DBSimpleField),
]
MULTIPLE_FIELDS = ["REFERENCE"]
@property
def gene(self):
if hasattr(self, "GENE"):
genes = self.GENE._convert()
else:
return None
org = self.organism
org_prefix = ""
if org:
match = re.findall(r"\[GN:([a-z]+)\]", org)
if match:
org_prefix = match[0] + ":"
genes = [org_prefix + g for g in genes]
return genes
class Pathway(DBDataBase):
"""
KEGG Pathway database
:param str prefix:
KEGG Organism code ('hsa', ...) or 'map', 'ko', 'ec' or 'rn'
"""
DB = "path"
ENTRY_TYPE = PathwayEntry
def __init__(self, prefix="map"):
DBDataBase.__init__(self)
self.prefix = prefix
valid = [d.org_code for d in self.api.list_organisms()] + \
["map", "ko", "ec", "rn"]
if prefix not in valid:
raise ValueError("Invalid prefix %r" % prefix)
self._keys = [d.entry_id for d in self.api.list("pathway/" + prefix)]
| 0.000279 |
from collections import defaultdict
from itertools import product
from bluesky.run_engine import RunEngine
# path to various states
idle = ['panicked', 'idle']
running = idle + ['running']
soft_pausing = running + ['soft_pausing']
hard_pausing = running + ['hard_pausing']
paused = soft_pausing + ['paused']
aborting = paused + ['aborting']
panicked = ['panicked']
state_paths_dict = {
'idle': idle,
'running': running,
'aborting': aborting,
'soft_pausing': soft_pausing,
'hard_pausing': hard_pausing,
'paused': paused,
'panicked': panicked}
def goto_state(state_machine, desired_state):
print("\nNavigating state machine to state [[%s]]" % desired_state)
for state in state_paths_dict[desired_state]:
print('current state = [[%s]]' % state_machine.state)
print('attempting to go to state [[%s]]' % state)
state_machine.set_(state)
def tautologically_define_state_machine_transitions(state_machine):
"""Create a mapping of all transitions in ``state_machine``
Parameters
----------
state_machine : super_state_machine.machines.StateMachine
The state machine you want a complete map of
Returns
-------
dict
Dictionary of all transitions in ``state_machine``
Structured as
{from_state1: [(to_state, allowed), ...],
from_state2: [(to_state, allowed), ...],
}
where
- ``allowed`` is a boolean
- ``from_stateN`` is a string
- ``to_state`` is a string
"""
transitions_as_enum = state_machine.__class__._meta['transitions']
transitions_as_names = {
to_state.value: [from_state.value for from_state in from_states]
for to_state, from_states in transitions_as_enum.items()}
transition_map = defaultdict(list)
all_states = set(state_machine.States.states())
for to_state, from_states in transitions_as_names.items():
for from_state in all_states:
allowed = True
if from_state not in from_states:
allowed = False
transition_map[to_state].append((from_state, allowed))
return transition_map
def define_state_machine_transitions_from_class(state_machine):
all_states = set(state_machine.States.states())
transitions = {from_state: set(to_states) for from_state, to_states
in state_machine.Meta.transitions.items()}
for transition in state_machine.Meta.named_transitions:
try:
transition_name, to_state, from_states = transition
except ValueError:
transition_name, to_state = transition
from_states = all_states
for from_state in from_states:
transitions[from_state].add(to_state)
transition_map = defaultdict(list)
for from_state, to_states in transitions.items():
for to_state in all_states:
allowed = True
if to_state not in to_states:
allowed = False
transition_map[from_state].append((to_state, allowed))
return transition_map
def setup_test_run_engine():
RE = RunEngine()
RE.md['owner'] = 'test_owner'
RE.md['group'] = 'Grant No. 12345'
RE.md['config'] = {'detector_model': 'XYZ', 'pixel_size': 10}
RE.md['beamline_id'] = 'test_beamline'
return RE
if __name__ == "__main__":
from bluesky import RunEngineStateMachine
sm = RunEngineStateMachine()
transition_map = dict(tautologically_define_state_machine_transitions(sm))
from pprint import pprint
pprint(transition_map)
| 0 |
#!/usr/bin/env python
import os
import sys
import re
from distutils.dep_util import newer_group, newer
from glob import glob
from os.path import join
def needs_veclib_wrapper(info):
"""Returns true if needs special veclib wrapper."""
import re
r_accel = re.compile("Accelerate")
r_vec = re.compile("vecLib")
res = False
try:
tmpstr = info['extra_link_args']
for i in tmpstr:
if r_accel.search(i) or r_vec.search(i):
res = True
except KeyError:
pass
return res
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
if needs_veclib_wrapper(lapack_opt):
methods += [join('FWRAPPERS', 'veclib_cabi_f.f'),
join('FWRAPPERS', 'veclib_cabi_c.c')]
else:
methods += [join('FWRAPPERS', 'dummy.f')]
Util = ['STOPTEST2.f.src','getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
config.add_extension('_iterative',
sources=[join('iterative', x) for x in sources],
extra_info=lapack_opt,
depends=[join('iterative', 'FWRAPPERS', x) for x in
['veclib_cabi_f.f', 'veclib_cabi_c.c', 'dummy.f']]
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 0.004253 |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_link_aggregation_group
short_description: Manage Direct Connect LAG bundles.
description:
- Create, delete, or modify a Direct Connect link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect link aggregation group.
choices:
- present
- absent
name:
description:
- The name of the Direct Connect link aggregation group.
link_aggregation_group_id:
description:
- The ID of the Direct Connect link aggregation group.
num_connections:
description:
- The number of connections with which to initialize the link aggregation group.
min_links:
description:
- The minimum number of physical connections that must be operational for the LAG itself to be operational.
location:
description:
- The location of the link aggregation group.
bandwidth:
description:
- The bandwidth of the link aggregation group.
force_delete:
description:
- This allows the minimum number of links to be set to 0, any hosted connections disassociated,
and any virtual interfaces associated to the LAG deleted.
type: bool
connection_id:
description:
- A connection ID to link with the link aggregation group upon creation.
delete_with_disassociation:
description:
- To be used with I(state=absent) to delete connections after disassociating them with the LAG.
type: bool
wait:
description:
- Whether or not to wait for the operation to complete. May be useful when waiting for virtual interfaces
to be deleted. May modify the time of waiting with C(wait_timeout).
type: bool
wait_timeout:
description:
- The duration in seconds to wait if I(wait) is True.
default: 120
"""
EXAMPLES = """
# create a Direct Connect connection
- aws_direct_connect_link_aggregation_group:
state: present
location: EqDC2
lag_id: dxlag-xxxxxxxx
bandwidth: 1Gbps
"""
RETURN = """
changed:
type: str
description: Whether or not the LAG has changed.
returned: always
aws_device:
type: str
description: The AWS Direct Connection endpoint that hosts the LAG.
sample: "EqSe2-1bwfvazist2k0"
returned: when I(state=present)
connections:
type: list
description: A list of connections bundled by this LAG.
sample:
"connections": [
{
"aws_device": "EqSe2-1bwfvazist2k0",
"bandwidth": "1Gbps",
"connection_id": "dxcon-fgzjah5a",
"connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
"connection_state": "down",
"lag_id": "dxlag-fgnsp4rq",
"location": "EqSe2",
"owner_account": "448830907657",
"region": "us-west-2"
}
]
returned: when I(state=present)
connections_bandwidth:
type: str
description: The individual bandwidth of the physical connections bundled by the LAG.
sample: "1Gbps"
returned: when I(state=present)
lag_id:
type: str
description: Unique identifier for the link aggregation group.
sample: "dxlag-fgnsp4rq"
returned: when I(state=present)
lag_name:
type: str
description: User-provided name for the link aggregation group.
returned: when I(state=present)
lag_state:
type: str
description: State of the LAG.
sample: "pending"
returned: when I(state=present)
location:
type: str
description: Where the connection is located.
sample: "EqSe2"
returned: when I(state=present)
minimum_links:
type: int
description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
returned: when I(state=present)
number_of_connections:
type: int
description: The number of physical connections bundled by the LAG.
returned: when I(state=present)
owner_account:
type: str
description: Owner account ID of the LAG.
returned: when I(state=present)
region:
type: str
description: The region in which the LAG exists.
returned: when I(state=present)
"""
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
get_aws_connection_info, boto3_conn, AWSRetry)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aws.direct_connect import (DirectConnectError,
delete_connection,
delete_virtual_interface,
disassociate_connection_and_lag)
import traceback
import time
try:
import botocore
except Exception:
pass
# handled by imported HAS_BOTO3
def lag_status(client, lag_id):
return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
def lag_exists(client, lag_id=None, lag_name=None, verify=True):
""" If verify=True, returns the LAG ID or None
If verify=False, returns the LAG's data (or an empty dict)
"""
try:
if lag_id:
response = client.describe_lags(lagId=lag_id)
else:
response = client.describe_lags()
except botocore.exceptions.ClientError as e:
if lag_id and verify:
return False
elif lag_id:
return {}
else:
failed_op = "Failed to describe DirectConnect link aggregation groups."
raise DirectConnectError(msg=failed_op,
last_traceback=traceback.format_exc(),
exception=e)
match = [] # List of LAG IDs that are exact matches
lag = [] # List of LAG data that are exact matches
# look for matching connections
if len(response.get('lags', [])) == 1 and lag_id:
if response['lags'][0]['lagState'] != 'deleted':
match.append(response['lags'][0]['lagId'])
lag.append(response['lags'][0])
else:
for each in response.get('lags', []):
if each['lagState'] != 'deleted':
if not lag_id:
if lag_name == each['lagName']:
match.append(each['lagId'])
else:
match.append(each['lagId'])
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
else:
if len(lag) == 1:
return lag[0]
else:
return {}
def create_lag(client, num_connections, location, bandwidth, name, connection_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
last_traceback=None,
exception="")
parameters = dict(numberOfConnections=num_connections,
location=location,
connectionsBandwidth=bandwidth,
lagName=name)
if connection_id:
parameters.update(connectionId=connection_id)
try:
lag = client.create_lag(**parameters)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
last_traceback=traceback.format_exc(),
exception=e)
return lag['lagId']
def delete_lag(client, lag_id):
try:
client.delete_lag(lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
def _update_lag(client, lag_id, lag_name, min_links):
params = {}
if min_links:
params.update(minimumLinks=min_links)
if lag_name:
params.update(lagName=lag_name)
client.update_lag(lagId=lag_id, **params)
def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
start = time.time()
if min_links and min_links > num_connections:
raise DirectConnectError(
msg="The number of connections {0} must be greater than the minimum number of links "
"{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
last_traceback=None,
exception=None
)
while True:
try:
_update_lag(client, lag_id, lag_name, min_links)
except botocore.exceptions.ClientError as e:
if wait and time.time() - start <= wait_timeout:
continue
msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
raise DirectConnectError(msg=msg,
last_traceback=traceback.format_exc(),
exception=e)
else:
break
def lag_changed(current_status, name, min_links):
""" Determines if a modifiable link aggregation group attribute has been modified. """
return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
exists = lag_exists(client, lag_id, lag_name)
if not exists and lag_id:
raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
last_traceback=None,
exception="")
# the connection is found; get the latest state and see if it needs to be updated
if exists:
lag_id = exists
latest_state = lag_status(client, lag_id)
if lag_changed(latest_state, lag_name, min_links):
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
return False, lag_id
# no connection found; create a new one
else:
lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
return True, lag_id
def describe_virtual_interfaces(client, lag_id):
try:
response = client.describe_virtual_interfaces(connectionId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
last_traceback=traceback.format_exc(),
exception=e)
return response.get('virtualInterfaces', [])
def get_connections_and_virtual_interfaces(client, lag_id):
virtual_interfaces = describe_virtual_interfaces(client, lag_id)
connections = lag_status(client, lag_id=lag_id).get('connections', [])
return virtual_interfaces, connections
def disassociate_vis(client, lag_id, virtual_interfaces):
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
try:
response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
lag_id = lag_exists(client, lag_id, lag_name)
if not lag_id:
return False
latest_status = lag_status(client, lag_id)
# determine the associated connections and virtual interfaces to disassociate
virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
# If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
"To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
"Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
"and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
last_traceback=None,
exception=None)
# update min_links to be 0 so we can remove the LAG
update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
# if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
for connection in connections:
disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
if delete_with_disassociation:
delete_connection(client, connection['connectionId'])
for vi in virtual_interfaces:
delete_virtual_interface(client, vi['virtualInterfaceId'])
start_time = time.time()
while True:
try:
delete_lag(client, lag_id)
except DirectConnectError as e:
if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
continue
else:
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
link_aggregation_group_id=dict(),
num_connections=dict(type='int'),
min_links=dict(type='int'),
location=dict(),
bandwidth=dict(),
connection_id=dict(),
delete_with_disassociation=dict(type='bool', default=False),
force_delete=dict(type='bool', default=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=120),
))
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[('link_aggregation_group_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='directconnect', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
response = {}
try:
if state == 'present':
changed, lag_id = ensure_present(connection,
num_connections=module.params.get("num_connections"),
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
location=module.params.get("location"),
bandwidth=module.params.get("bandwidth"),
connection_id=module.params.get("connection_id"),
min_links=module.params.get("min_links"),
wait=module.params.get("wait"),
wait_timeout=module.params.get("wait_timeout"))
response = lag_status(connection, lag_id)
elif state == "absent":
changed = ensure_absent(connection,
lag_id=module.params.get("link_aggregation_group_id"),
lag_name=module.params.get("name"),
force_delete=module.params.get("force_delete"),
delete_with_disassociation=module.params.get("delete_with_disassociation"),
wait=module.params.get('wait'),
wait_timeout=module.params.get('wait_timeout'))
except DirectConnectError as e:
if e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()
| 0.00386 |
#!/usr/bin/python
#
# Copyright 2012-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.phantomjs.service import Service
class WebDriver(RemoteWebDriver):
"""
Wrapper to communicate with PhantomJS through Ghostdriver.
You will need to follow all the directions here:
https://github.com/detro/ghostdriver
"""
def __init__(self, executable_path="phantomjs",
port=0, desired_capabilities=DesiredCapabilities.PHANTOMJS,
service_args=None, service_log_path=None):
"""
Creates a new instance of the PhantomJS / Ghostdriver.
Starts the service and then creates new instance of the driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- service_args : A List of command line arguments to pass to PhantomJS
- service_log_path: Path for phantomjs service to log to.
"""
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
command_executor = self.service.service_url
try:
RemoteWebDriver.__init__(self,
command_executor=command_executor,
desired_capabilities=desired_capabilities)
except:
self.quit()
raise
self._is_remote = False
# Patch to support Native PhantomJS script
self.command_executor = RemoteConnection(command_executor, keep_alive=False)
Command.EXECUTE_PHANTOM_SCRIPT = "executePhantomScript"
self.command_executor._commands[Command.EXECUTE_PHANTOM_SCRIPT] = ("POST", "/session/$sessionId/phantom/execute")
def quit(self):
"""
Closes the browser and shuts down the PhantomJS executable
that is started when starting the PhantomJS
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
def execute_phantomjs(self, script, *args):
"""
Synchronously Executes JavaScript in the PhantomJS context.
This allows access to advanced features like clipRect.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_phantomjs('')
"""
#script = script.replace("\"", "\\\"")
converted_args = list(args)
return self.execute(Command.EXECUTE_PHANTOM_SCRIPT,
{'script': script, 'args':converted_args})['value']
| 0.003867 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Author:
# mail:
# Copyright:
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Croatia - vat special",
"description" : """
Croatian localisation.
======================
Author:
Contributions:
Description:
PDV obrazac , Knjiga ura/ira
""",
"version" : "1",
"author" : "Croatian community",
"category" : "Localisation/Croatia",
"website": "https://launchpad.net/openobject-croatia",
'depends': [
'account_tax_payment',
'base_vat',
'base_iban',
#'account_chart',
'l10n_hr_base',
],
'init_xml': [],
'update_xml': [
'security/ir.model.access.csv',
'account_view.xml',
'pdv_knjiga_view.xml',
'pdv_config_view.xml',
'wizard/wizard_pdv_obrazac_view.xml',
'wizard/wizard_pdv_knjiga_view.xml',
#'data/l10n_hr_pdv.knjiga.csv', #import manualy or new module
#'data/l10n_hr_pdv.report.obrazac.csv', #fails on mc now on Verso HR
#'data/l10n_hr_pdv.report.knjiga.csv', #import manualy or new module
],
"demo_xml" : [],
'test' : [],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.009272 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.coils import CoilCoolingWater
log = logging.getLogger(__name__)
class TestCoilCoolingWater(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coilcoolingwater(self):
pyidf.validation_level = ValidationLevel.error
obj = CoilCoolingWater()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# real
var_design_water_flow_rate = 0.0
obj.design_water_flow_rate = var_design_water_flow_rate
# real
var_design_air_flow_rate = 0.0
obj.design_air_flow_rate = var_design_air_flow_rate
# real
var_design_inlet_water_temperature = 0.0001
obj.design_inlet_water_temperature = var_design_inlet_water_temperature
# real
var_design_inlet_air_temperature = 0.0001
obj.design_inlet_air_temperature = var_design_inlet_air_temperature
# real
var_design_outlet_air_temperature = 0.0001
obj.design_outlet_air_temperature = var_design_outlet_air_temperature
# real
var_design_inlet_air_humidity_ratio = 0.0
obj.design_inlet_air_humidity_ratio = var_design_inlet_air_humidity_ratio
# real
var_design_outlet_air_humidity_ratio = 0.0
obj.design_outlet_air_humidity_ratio = var_design_outlet_air_humidity_ratio
# node
var_water_inlet_node_name = "node|Water Inlet Node Name"
obj.water_inlet_node_name = var_water_inlet_node_name
# node
var_water_outlet_node_name = "node|Water Outlet Node Name"
obj.water_outlet_node_name = var_water_outlet_node_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# alpha
var_type_of_analysis = "SimpleAnalysis"
obj.type_of_analysis = var_type_of_analysis
# alpha
var_heat_exchanger_configuration = "CrossFlow"
obj.heat_exchanger_configuration = var_heat_exchanger_configuration
# object-list
var_condensate_collection_water_storage_tank_name = "object-list|Condensate Collection Water Storage Tank Name"
obj.condensate_collection_water_storage_tank_name = var_condensate_collection_water_storage_tank_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coilcoolingwaters[0].name, var_name)
self.assertEqual(idf2.coilcoolingwaters[0].availability_schedule_name, var_availability_schedule_name)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_water_flow_rate, var_design_water_flow_rate)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_air_flow_rate, var_design_air_flow_rate)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_inlet_water_temperature, var_design_inlet_water_temperature)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_inlet_air_temperature, var_design_inlet_air_temperature)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_outlet_air_temperature, var_design_outlet_air_temperature)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_inlet_air_humidity_ratio, var_design_inlet_air_humidity_ratio)
self.assertAlmostEqual(idf2.coilcoolingwaters[0].design_outlet_air_humidity_ratio, var_design_outlet_air_humidity_ratio)
self.assertEqual(idf2.coilcoolingwaters[0].water_inlet_node_name, var_water_inlet_node_name)
self.assertEqual(idf2.coilcoolingwaters[0].water_outlet_node_name, var_water_outlet_node_name)
self.assertEqual(idf2.coilcoolingwaters[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.coilcoolingwaters[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.coilcoolingwaters[0].type_of_analysis, var_type_of_analysis)
self.assertEqual(idf2.coilcoolingwaters[0].heat_exchanger_configuration, var_heat_exchanger_configuration)
self.assertEqual(idf2.coilcoolingwaters[0].condensate_collection_water_storage_tank_name, var_condensate_collection_water_storage_tank_name) | 0.004588 |
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# @author: Somik Behera, Nicira Networks, Inc.
import httplib # basic HTTP library for HTTPS connections
import logging
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import (
client_eventlet, request_eventlet)
LOG = logging.getLogger("NVPApiHelper")
LOG.setLevel(logging.INFO)
class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
'''
Helper class to do basic login, cookie management, and provide base
method to send HTTP requests.
Implements new eventlet-based framework derived from the management
console nvp_gevent_client module.
'''
def __init__(self, api_providers, user, password, request_timeout,
http_timeout, retries, redirects, failover_time,
concurrent_connections=3):
'''Constructor.
:param api_providers: a list of tuples in the form:
(host, port, is_ssl=True). Passed on to NvpClientEventlet.
:param user: the login username.
:param password: the login password.
:param concurrent_connections: the number of concurrent connections.
:param request_timeout: all operations (including retries, redirects
from unresponsive controllers, etc) should finish within this
timeout.
:param http_timeout: how long to wait before aborting an
unresponsive controller (and allow for retries to another
controller in the cluster)
:param retries: the number of concurrent connections.
:param redirects: the number of concurrent connections.
:param failover_time: minimum time between controller failover and new
connections allowed.
'''
client_eventlet.NvpApiClientEventlet.__init__(
self, api_providers, user, password, concurrent_connections,
failover_time=failover_time)
self._request_timeout = request_timeout
self._http_timeout = http_timeout
self._retries = retries
self._redirects = redirects
def login(self, user=None, password=None):
'''Login to NVP controller.
Assumes same password is used for all controllers.
:param user: NVP controller user (usually admin). Provided for
backwards compatability. In the normal mode of operation
this should be None.
:param password: NVP controller password. Provided for backwards
compatability. In the normal mode of operation this should
be None.
:returns: Does not return a value.
'''
if user:
self._user = user
if password:
self._password = password
return client_eventlet.NvpApiClientEventlet.login(self)
def request(self, method, url, body="", content_type="application/json"):
'''Issues request to controller.'''
g = request_eventlet.NvpGenericRequestEventlet(
self, method, url, body, content_type, auto_login=True,
request_timeout=self._request_timeout,
http_timeout=self._http_timeout,
retries=self._retries, redirects=self._redirects)
g.start()
response = g.join()
LOG.debug('NVPApiHelper.request() returns "%s"' % response)
# response is a modified HTTPResponse object or None.
# response.read() will not work on response as the underlying library
# request_eventlet.NvpApiRequestEventlet has already called this
# method in order to extract the body and headers for processing.
# NvpApiRequestEventlet derived classes call .read() and
# .getheaders() on the HTTPResponse objects and store the results in
# the response object's .body and .headers data members for future
# access.
if response is None:
# Timeout.
LOG.error('Request timed out: %s to %s' % (method, url))
raise RequestTimeout()
status = response.status
if status == httplib.UNAUTHORIZED:
raise UnAuthorizedRequest()
# Fail-fast: Check for exception conditions and raise the
# appropriate exceptions for known error codes.
if status in self.error_codes:
LOG.error("Received error code: %s" % status)
LOG.error("Server Error Message: %s" % response.body)
self.error_codes[status](self)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
% (method, url, response.status, response.body))
return None
return response.body
def fourZeroFour(self):
raise ResourceNotFound()
def fourZeroNine(self):
raise Conflict()
def fiveZeroThree(self):
raise ServiceUnavailable()
def fourZeroThree(self):
raise Forbidden()
def zero(self):
raise NvpApiException()
# TODO(del): ensure error_codes are handled/raised appropriately
# in api_client.
error_codes = {404: fourZeroFour,
409: fourZeroNine,
503: fiveZeroThree,
403: fourZeroThree,
301: zero,
307: zero,
400: zero,
500: zero,
503: zero}
class NvpApiException(Exception):
'''
Base NvpApiClient Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
'''
message = "An unknown exception occurred."
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class UnAuthorizedRequest(NvpApiException):
message = "Server denied session's authentication credentials."
class ResourceNotFound(NvpApiException):
message = "An entity referenced in the request was not found."
class Conflict(NvpApiException):
message = "Request conflicts with configuration on a different entity."
class ServiceUnavailable(NvpApiException):
message = ("Request could not completed because the associated "
"resource could not be reached.")
class Forbidden(NvpApiException):
message = ("The request is forbidden from accessing the "
"referenced resource.")
class RequestTimeout(NvpApiException):
message = "The request has timed out."
| 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Deepin, Inc.
# 2013 Zhai Xiang
#
# Author: Zhai Xiang <[email protected]>
# Maintainer: Zhai Xiang <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from dtk.ui.init_skin import init_skin
from deepin_utils.file import get_parent_dir, get_current_dir
import os
app_theme = init_skin(
"deepin-ui-demo",
"1.0",
"01",
os.path.join(get_parent_dir(__file__), "skin"),
os.path.join(get_parent_dir(__file__), "app_theme"),
)
import deepin_lunar
from dtk.ui.application import Application
from dtk.ui.constant import DEFAULT_WINDOW_WIDTH, DEFAULT_WINDOW_HEIGHT
if __name__ == "__main__":
# Init application.
application = Application()
# Set application default size.
application.set_default_size(DEFAULT_WINDOW_WIDTH, DEFAULT_WINDOW_HEIGHT)
# Set application icon.
application.set_icon(os.path.join(get_current_dir(__file__), "icon.ico"))
# Set application preview pixbuf.
application.set_skin_preview(os.path.join(get_current_dir(__file__), "frame.png"))
# Add titlebar.
application.add_titlebar(
["theme", "max", "min", "close"],
os.path.join(get_current_dir(__file__), "logo.png"),
"TimeZone demo",
"TimeZone demo",
)
deepin_lunar_obj = deepin_lunar.new()
deepin_lunar_obj.mark_day(11)
application.main_box.pack_start(deepin_lunar_obj.get_handle())
application.run()
| 0.006657 |
"""
Support for the demo speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/demo/
"""
import os
import voluptuous as vol
from homeassistant.components.tts import Provider, PLATFORM_SCHEMA, CONF_LANG
SUPPORT_LANGUAGES = [
'en', 'de'
]
DEFAULT_LANG = 'en'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
})
def get_engine(hass, config):
"""Setup Demo speech component."""
return DemoProvider(config[CONF_LANG])
class DemoProvider(Provider):
"""Demo speech api provider."""
def __init__(self, lang):
"""Initialize demo provider."""
self._lang = lang
self.name = 'Demo'
@property
def default_language(self):
"""Default language."""
return self._lang
@property
def supported_languages(self):
"""List of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""List of supported options like voice, emotionen."""
return ['voice', 'age']
def get_tts_audio(self, message, language, options=None):
"""Load TTS from demo."""
filename = os.path.join(os.path.dirname(__file__), "demo.mp3")
try:
with open(filename, 'rb') as voice:
data = voice.read()
except OSError:
return (None, None)
return ("mp3", data)
| 0 |
"""
raven.events
~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
import sys
from raven.utils import varmap
from raven.utils.encoding import shorten, to_unicode
from raven.utils.stacks import get_stack_info, iter_traceback_frames, \
get_culprit
__all__ = ('BaseEvent', 'Exception', 'Message', 'Query')
class BaseEvent(object):
def __init__(self, client):
self.client = client
self.logger = logging.getLogger(__name__)
def to_string(self, data):
raise NotImplementedError
def capture(self, **kwargs):
return {
}
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'ClassName'
- module '__builtin__' (i.e. __builtin__.TypeError)
- frames: a list of serialized frames (see _get_traceback_frames)
"""
def to_string(self, data):
exc = data['sentry.interfaces.Exception']
if exc['value']:
return '%s: %s' % (exc['type'], exc['value'])
return exc['type']
def get_hash(self, data):
exc = data['sentry.interfaces.Exception']
output = [exc['type']]
for frame in data['sentry.interfaces.Stacktrace']['frames']:
output.append(frame['module'])
output.append(frame['function'])
return output
def capture(self, exc_info=None, **kwargs):
new_exc_info = False
if not exc_info or exc_info is True:
new_exc_info = True
exc_info = sys.exc_info()
if not exc_info:
raise ValueError('No exception found')
try:
exc_type, exc_value, exc_traceback = exc_info
frames = varmap(lambda k, v: shorten(v,
string_length=self.client.string_max_length, list_length=self.client.list_max_length),
get_stack_info(iter_traceback_frames(exc_traceback)))
culprit = get_culprit(frames, self.client.include_paths, self.client.exclude_paths)
exc_module = getattr(exc_type, '__module__', None)
exc_type = getattr(exc_type, '__name__', '<unknown>')
finally:
if new_exc_info:
try:
del exc_info
del exc_traceback
except Exception, e:
self.logger.exception(e)
return {
'level': logging.ERROR,
'culprit': culprit,
'sentry.interfaces.Exception': {
'value': to_unicode(exc_value),
'type': str(exc_type),
'module': str(exc_module),
},
'sentry.interfaces.Stacktrace': {
'frames': frames
},
}
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
def to_string(self, data):
msg = data['sentry.interfaces.Message']
if msg.get('params'):
return msg['message'] % msg['params']
return msg['message']
def get_hash(self, data):
msg = data['sentry.interfaces.Message']
return [msg['message']]
def capture(self, message, params=(), **kwargs):
data = {
'sentry.interfaces.Message': {
'message': message,
'params': params,
}
}
return data
class Query(BaseEvent):
"""
Messages store the following metadata:
- query: 'SELECT * FROM table'
- engine: 'postgesql_psycopg2'
"""
def to_string(self, data):
sql = data['sentry.interfaces.Query']
return sql['query']
def get_hash(self, data):
sql = data['sentry.interfaces.Query']
return [sql['query'], sql['engine']]
def capture(self, query, engine, **kwargs):
return {
'sentry.interfaces.Query': {
'query': query,
'engine': engine,
}
}
| 0.000969 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging
import shareabouts
import sys
logging.basicConfig(level='DEBUG')
def main(api_key):
# api = shareabouts.ShareaboutsApi('http://shareaboutsapi-civicworks.dotcloud.com/api/v1/')
api = shareabouts.ShareaboutsApi('http://localhost:8000/api/v1/')
api.authenticate_with_key(api_key)
dataset = 'biketotransit'
owner = 'biketotransit'
user_responses_uri = api.build_uri('all_submissions', username=owner,
dataset_slug=dataset, type='surveys') + '?visible=all'
user_responses = api.get(user_responses_uri)
for user_response in user_responses:
changed = False
if not user_response['visible']:
user_response['visible'] = True
changed = True
if changed:
# These are special fields. This needs to be handled more gracefully.
user_response.pop('created_datetime')
user_response.pop('updated_datetime')
user_response.pop('dataset')
user_response.pop('id')
response = api.send('PUT', url=user_response.pop('url'), data=user_response)
if __name__ == '__main__':
api_key = sys.argv[1]
sys.exit(main(api_key))
| 0.00556 |
#!/usr/bin/env python
import struct
import os
def xor_file(input_file, output_file, xorkey):
number_added = 0
while True:
some_bytes = input_file.read(4)
if len(some_bytes) == 0:
break
if len(some_bytes) % 4 != 0:
number_added = 4 - len(some_bytes)
some_bytes = some_bytes + "\x00" * (number_added)
writable_bytes = struct.pack("<I", (struct.unpack("<I", some_bytes)[0]) ^ xorkey)
output_file.write(writable_bytes)
if number_added != 0:
number_added = 0 - number_added
output_file.seek(number_added, os.SEEK_END)
output_file.truncate()
def write_rsrc(f, oldrva, newRva):
'''
This parses a .rsrc section and will adjust the RVA attributes
for patching on to the OnionDuke Stub
'''
rsrc_structure = {}
def parse_header(f):
return {"Characteristics": struct.unpack("<I", f.read(4))[0],
"TimeDataStamp": struct.unpack("<I", f.read(4))[0],
"MajorVersion": struct.unpack("<H", f.read(2))[0],
"MinorVersion": struct.unpack("<H", f.read(2))[0],
"NumberOfNamedEntries": struct.unpack("<H", f.read(2))[0],
"NumberofIDEntries": struct.unpack("<H", f.read(2))[0],
}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
def parse_data_entry(f):
return {"WriteME": f.tell(),
"RVA of Data": struct.unpack("<I", f.read(4))[0],
"Size": struct.unpack("<I", f.read(4))[0],
"CodePage": struct.unpack("<I", f.read(4))[0],
"Reserved": struct.unpack("<I", f.read(4))[0]
}
def parse_ID(f, number):
temp = {}
for i in range(0, number):
_tempid = struct.unpack("<I", f.read(4))[0]
temp[_tempid] = struct.unpack("<I", f.read(4))[0]
return temp
#parse initial header
rsrc_structure['Typeheader'] = parse_header(f)
rsrc_structure['Typeheader']['NameEntries'] = {}
rsrc_structure['Typeheader']["IDentries"] = {}
if rsrc_structure['Typeheader']["NumberofIDEntries"]:
rsrc_structure['Typeheader']["IDentries"] = parse_ID(f, rsrc_structure['Typeheader']["NumberofIDEntries"])
if rsrc_structure['Typeheader']["NumberOfNamedEntries"]:
rsrc_structure['Typeheader']['NameEntries'] = parse_ID(f, rsrc_structure['Typeheader']['NumberOfNamedEntries'])
#merge, flatten
rsrc_structure['Typeheader']['Entries'] = merge_two_dicts(rsrc_structure['Typeheader']["IDentries"],
rsrc_structure['Typeheader']['NameEntries'])
for entry, value in rsrc_structure['Typeheader']["Entries"].iteritems():
#jump to location in PE adjusted for RVA
f.seek((value & 0xffffff), 0)
rsrc_structure[entry] = parse_header(f)
rsrc_structure[entry]["IDs"] = {}
rsrc_structure[entry]["Names"] = {}
if rsrc_structure[entry]["NumberofIDEntries"]:
rsrc_structure[entry]["IDs"] = parse_ID(f, rsrc_structure[entry]["NumberofIDEntries"])
if rsrc_structure[entry]["NumberOfNamedEntries"]:
rsrc_structure[entry]["Names"] = parse_ID(f, rsrc_structure[entry]["NumberOfNamedEntries"])
rsrc_structure[entry]["NameIDs"] = merge_two_dicts(rsrc_structure[entry]["IDs"],
rsrc_structure[entry]["Names"])
#Now get language
for name_id, offset in rsrc_structure[entry]["NameIDs"].iteritems():
f.seek((offset & 0xffffff), 0)
rsrc_structure[name_id] = parse_header(f)
rsrc_structure[name_id]["IDs"] = {}
rsrc_structure[name_id]["Names"] = {}
if rsrc_structure[name_id]["NumberofIDEntries"]:
rsrc_structure[name_id]["IDs"] = parse_ID(f, rsrc_structure[name_id]["NumberofIDEntries"])
if rsrc_structure[name_id]["NumberOfNamedEntries"]:
rsrc_structure[name_id]["Names"] = parse_ID(f, rsrc_structure[name_id]["NumberOfNamedEntries"])
rsrc_structure[name_id]["language"] = merge_two_dicts(rsrc_structure[name_id]["IDs"],
rsrc_structure[name_id]["Names"])
#now get Data Entry Details and write
for lanID, offsetDataEntry in rsrc_structure[name_id]["language"].iteritems():
f.seek((offsetDataEntry & 0xffffff), 0)
rsrc_structure[lanID] = parse_data_entry(f)
#write to location
f.seek(rsrc_structure[lanID]["WriteME"], 0)
f.write(struct.pack("<I", rsrc_structure[lanID]["RVA of Data"] - oldrva + newRva))
| 0.004294 |
"""
============
pysillywalks
============
This is an example project for learning about git and python.
**It is very silly**
Installation
============
Use the standard ``python setup.py install``
Quick Usage
===========
The constructor expects the following keyword arguments:
- **walk**: A string of the walk type or genre
- **silly**: An integer rating of the silliness level
Example initialization:
from pysillywalks import Silly
s = Silly(walk="pirate", silly=2)
if s.is_very_silly:
s.action()
else:
print "Not silly enough, sorry!"
License
=======
Copyright (c) 2012 Mark Allen
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from distutils.core import setup
setup(author='Mark Allen',
author_email='[email protected]',
description='A very silly library',
long_description=__doc__,
fullname='pysillywalks',
name='pysillywalks',
url='https://github.com/mrallen1/pysillywalks',
download_url='https://github.com/mrallen1/pysillywalks',
version='1.0.0',
license='MIT',
platforms=['Linux','Windows'],
packages=['pysillywalks'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
| 0.005843 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locking related utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
class GroupLock(object):
"""A lock to allow many members of a group to access a resource exclusively.
This lock provides a way to allow access to a resource by multiple threads
belonging to a logical group at the same time, while restricting access to
threads from all other groups. You can think of this as an extension of a
reader-writer lock, where you allow multiple writers at the same time. We
made it generic to support multiple groups instead of just two - readers and
writers.
Simple usage example with two groups accessing the same resource:
```python
lock = GroupLock(num_groups=2)
# In a member of group 0:
with lock.group(0):
# do stuff, access the resource
# ...
# In a member of group 1:
with lock.group(1):
# do stuff, access the resource
# ...
```
Using as a context manager with `.group(group_id)` is the easiest way. You
can also use the `acquire` and `release` method directly.
"""
def __init__(self, num_groups=2):
"""Initialize a group lock.
Args:
num_groups: The number of groups that will be accessing the resource under
consideration. Should be a positive number.
Returns:
A group lock that can then be used to synchronize code.
Raises:
ValueError: If num_groups is less than 1.
"""
if num_groups < 1:
raise ValueError("num_groups must be a positive integer, got {}".format(
num_groups))
self._ready = threading.Condition(threading.Lock())
self._num_groups = num_groups
self._group_member_counts = [0] * self._num_groups
def group(self, group_id):
"""Enter a context where the lock is with group `group_id`.
Args:
group_id: The group for which to acquire and release the lock.
Returns:
A context manager which will acquire the lock for `group_id`.
"""
self._validate_group_id(group_id)
return self._Context(self, group_id)
def acquire(self, group_id):
"""Acquire the group lock for a specific group `group_id`."""
self._validate_group_id(group_id)
self._ready.acquire()
while self._another_group_active(group_id):
self._ready.wait()
self._group_member_counts[group_id] += 1
self._ready.release()
def release(self, group_id):
"""Release the group lock for a specific group `group_id`."""
self._validate_group_id(group_id)
self._ready.acquire()
self._group_member_counts[group_id] -= 1
if self._group_member_counts[group_id] == 0:
self._ready.notifyAll()
self._ready.release()
def _another_group_active(self, group_id):
return any(
c > 0 for g, c in enumerate(self._group_member_counts) if g != group_id)
def _validate_group_id(self, group_id):
if group_id < 0 or group_id >= self._num_groups:
raise ValueError(
"group_id={} should be between 0 and num_groups={}".format(
group_id, self._num_groups))
class _Context(object):
"""Context manager helper for `GroupLock`."""
def __init__(self, lock, group_id):
self._lock = lock
self._group_id = group_id
def __enter__(self):
self._lock.acquire(self._group_id)
def __exit__(self, type_arg, value_arg, traceback_arg):
del type_arg, value_arg, traceback_arg
self._lock.release(self._group_id)
| 0.004564 |
"""Test Home Assistant volume utility functions."""
import pytest
from homeassistant.const import (
VOLUME_FLUID_OUNCE,
VOLUME_GALLONS,
VOLUME_LITERS,
VOLUME_MILLILITERS,
)
import homeassistant.util.volume as volume_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = VOLUME_LITERS
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert volume_util.convert(2, VOLUME_LITERS, VOLUME_LITERS) == 2
assert volume_util.convert(3, VOLUME_MILLILITERS, VOLUME_MILLILITERS) == 3
assert volume_util.convert(4, VOLUME_GALLONS, VOLUME_GALLONS) == 4
assert volume_util.convert(5, VOLUME_FLUID_OUNCE, VOLUME_FLUID_OUNCE) == 5
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(ValueError):
volume_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(ValueError):
volume_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
volume_util.convert("a", VOLUME_GALLONS, VOLUME_LITERS)
def test_convert_from_liters():
"""Test conversion from liters to other units."""
liters = 5
assert volume_util.convert(liters, VOLUME_LITERS, VOLUME_GALLONS) == 1.321
def test_convert_from_gallons():
"""Test conversion from gallons to other units."""
gallons = 5
assert volume_util.convert(gallons, VOLUME_GALLONS, VOLUME_LITERS) == 18.925
| 0.000657 |
import warnings
from urllib.parse import urlparse, urlunparse
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
class SuccessURLAllowedHostsMixin:
success_url_allowed_hosts = set()
def get_success_url_allowed_hosts(self):
allowed_hosts = {self.request.get_host()}
allowed_hosts.update(self.success_url_allowed_hosts)
return allowed_hosts
class LoginView(SuccessURLAllowedHostsMixin, FormView):
"""
Display the login form and handle the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
url = self.get_redirect_url()
return url or resolve_url(settings.LOGIN_REDIRECT_URL)
def get_redirect_url(self):
"""Return the user-originating redirect URL if it's safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
return redirect_to if url_is_safe else ''
def get_form_class(self):
return self.authentication_form or self.form_class
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_redirect_url(),
'site': current_site,
'site_name': current_site.name,
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
def login(request, *args, **kwargs):
warnings.warn(
'The login() view is superseded by the class-based LoginView().',
RemovedInDjango21Warning, stacklevel=2
)
return LoginView.as_view(**kwargs)(request, *args, **kwargs)
class LogoutView(SuccessURLAllowedHostsMixin, TemplateView):
"""
Log out the user and display the 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super().dispatch(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
# Security check -- Ensure the user-originating redirection URL is
# safe.
if not url_is_safe:
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
def logout(request, *args, **kwargs):
warnings.warn(
'The logout() view is superseded by the class-based LogoutView().',
RemovedInDjango21Warning, stacklevel=2
)
return LogoutView.as_view(**kwargs)(request, *args, **kwargs)
_sentinel = object()
def logout_then_login(request, login_url=None, extra_context=_sentinel):
"""
Log out the user if they are logged in. Then redirect to the login page.
"""
if extra_context is not _sentinel:
warnings.warn(
"The unused `extra_context` parameter to `logout_then_login` "
"is deprecated.", RemovedInDjango21Warning
)
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return LogoutView.as_view(next_page=login_url)(request)
def redirect_to_login(next, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirect the user to the login page, passing the given 'next' page.
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango21Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
Check the hash in a password reset link and present a form for entering a
new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin:
extra_context = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super().form_valid(form)
INTERNAL_RESET_URL_TOKEN = 'set-password'
INTERNAL_RESET_SESSION_TOKEN = '_password_reset_token'
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
post_reset_login = False
post_reset_login_backend = None
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
self.validlink = False
self.user = self.get_user(kwargs['uidb64'])
if self.user is not None:
token = kwargs['token']
if token == INTERNAL_RESET_URL_TOKEN:
session_token = self.request.session.get(INTERNAL_RESET_SESSION_TOKEN)
if self.token_generator.check_token(self.user, session_token):
# If the token is valid, display the password reset form.
self.validlink = True
return super().dispatch(*args, **kwargs)
else:
if self.token_generator.check_token(self.user, token):
# Store the token in the session and redirect to the
# password reset form at a URL without the token. That
# avoids the possibility of leaking the token in the
# HTTP Referer header.
self.request.session[INTERNAL_RESET_SESSION_TOKEN] = token
redirect_url = self.request.path.replace(token, INTERNAL_RESET_URL_TOKEN)
return HttpResponseRedirect(redirect_url)
# Display the "Password reset unsuccessful" page.
return self.render_to_response(self.get_context_data())
def get_user(self, uidb64):
try:
# urlsafe_base64_decode() decodes to bytestring
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
return user
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.user
return kwargs
def form_valid(self, form):
user = form.save()
del self.request.session[INTERNAL_RESET_SESSION_TOKEN]
if self.post_reset_login:
auth_login(self.request, user, self.post_reset_login_backend)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.validlink:
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
@sensitive_post_parameters()
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
warnings.warn("The password_change_done() view is superseded by the "
"class-based PasswordChangeDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super().form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
| 0.000462 |
bl_info = {
"name": "Workplane",
"version": (0, 1, 0),
"blender": (2, 80, 0),
"category": "3D View",
"author": "Benjamin Sauder",
"description": "Allows for quicker workflow using move/rotate/scale on a user defined workplane",
"version": (0, 2),
"location": "View3D > Tool Shelf",
}
if "bpy" in locals():
import importlib
importlib.reload(main)
importlib.reload(data)
importlib.reload(draw)
importlib.reload(operator)
importlib.reload(ui)
importlib.reload(update)
importlib.reload(util)
else:
from . import (
main,
data,
draw,
operator,
ui,
update,
util
)
import bpy
classes = [
data.WP_OT_WorkplaneProperties,
update.WP_OT_Updater,
operator.WP_OT_SetWorkPlane,
operator.WP_OT_WorkplaneTranslate,
operator.WP_OT_WorkplaneRotate,
operator.WP_OT_WorkplaneScale,
operator.WP_OT_WorkplaneExtrude,
ui.VIEW3D_PT_WORKINGPLANE,
# ui.VIEW3D_PT_WorkplanePanelTransform,
# ui.VIEW3D_PT_WorkplanePanelMeshEdit,
]
def register():
for c in classes:
bpy.utils.register_class(c)
bpy.types.Scene.workplane = bpy.props.PointerProperty(type=data.WP_OT_WorkplaneProperties)
def unregister():
update.WP_OT_Updater.Running = False
main.draw.disable()
del bpy.types.Scene.workplane
for c in classes:
bpy.utils.unregister_class(c)
| 0.009517 |
import pytest
import functools
from urllib import parse
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.path import WaterButlerPathPart
class EncodedPathPart:
DECODE = parse.unquote
ENCODE = functools.partial(parse.quote, safe='')
class EncodedPath:
PART_CLASS = EncodedPathPart
class TestPathPart:
pass
class TestPath:
def test_name(self):
path = WaterButlerPath('/this/is/a/long/path')
assert path.name == 'path'
def test_parent(self):
path = WaterButlerPath('/this/is/a/long/path')
assert path.parent.name == 'long'
assert path.parent == WaterButlerPath('/this/is/a/long/')
def test_ending_slash_is_folder(self):
assert WaterButlerPath('/this/is/folder/').is_dir is True
assert WaterButlerPath('/this/is/folder/').is_file is False
def test_no_ending_slash_is_file(self):
assert WaterButlerPath('/this/is/file').is_dir is False
assert WaterButlerPath('/this/is/file').is_file is True
def test_is_root(self):
assert WaterButlerPath('/').is_root is True
assert WaterButlerPath('/this/is/folder/').is_root is False
def test_child(self):
path = WaterButlerPath('/this/is/a/long/')
assert path.name == 'long'
assert path.child('path').name == 'path'
def test_rename(self):
path = WaterButlerPath('/this/is/a/long/path')
assert path.name == 'path'
path.rename('journey')
assert path.name == 'journey'
class TestValidation:
def test_double_slash_is_invalid(self):
with pytest.raises(exceptions.InvalidPathError):
WaterButlerPath('/this//is/a/path')
def test_must_start_with_slash(self):
with pytest.raises(exceptions.InvalidPathError):
WaterButlerPath('this/is/a/path')
def test_cant_be_empty(self):
with pytest.raises(exceptions.InvalidPathError):
WaterButlerPath('')
def test_cant_have_dotdot(self):
with pytest.raises(exceptions.InvalidPathError):
WaterButlerPath('/etc/nginx/../')
| 0.001393 |
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
from abc import abstractmethod
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
from neutron import quota
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'net_partitions': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '',
'validate': {'type:name_not_default': None}},
'description': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '',
'validate': {'type:string_or_none': None}},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
},
}
class Netpartition(object):
"""Extension class supporting net_partition.
"""
@classmethod
def get_name(cls):
return "NetPartition"
@classmethod
def get_alias(cls):
return "net-partition"
@classmethod
def get_description(cls):
return "NetPartition"
@classmethod
def get_namespace(cls):
return "http://nuagenetworks.net/ext/net_partition/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-01-01T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = 'net_partition'
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
quota.QUOTAS.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=True)
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
class NetPartitionPluginBase(object):
@abstractmethod
def create_net_partition(self, context, router):
pass
@abstractmethod
def update_net_partition(self, context, id, router):
pass
@abstractmethod
def get_net_partition(self, context, id, fields=None):
pass
@abstractmethod
def delete_net_partition(self, context, id):
pass
@abstractmethod
def get_net_partitions(self, context, filters=None, fields=None):
pass
| 0 |
custom_attributes_dict = {
'global': {
'max_conn': {
'type': int,
'limits': [1, 65535],
'cmd': 'maxconn %d'
},
'max_conn_rate': {
'type': int,
'limits': [1, 65535],
'cmd': 'maxconnrate %d'
},
'max_sess_rate': {
'type': int,
'limits': [1, 65535],
'cmd': 'maxsessrate %d'
},
'max_ssl_conn': {
'type': int,
'limits': [1, 65535],
'cmd': 'maxsslconn %d'
},
'max_ssl_rate': {
'type': int,
'limits': [1, 65535],
'cmd': 'maxsslrate %d'
},
'ssl_ciphers': {
'type': str,
'limits': [1, 100],
'cmd': 'ssl-default-bind-ciphers %s'
},
'tune_http_max_header': {
'type': int,
'limits': [1, 128],
'cmd': 'tune.http.maxhdr %d'
},
'tune_ssl_max_record': {
'type': int,
'limits': [1, 16384],
'cmd': 'tune.ssl.maxrecord %d'
}
},
'default': {
'server_timeout': {
'type': int,
'limits': [1, 5000000],
'cmd': 'timeout server %d'
},
'client_timeout': {
'type': int,
'limits': [1, 5000000],
'cmd': 'timeout client %d'
},
'connect_timeout': {
'type': int,
'limits': [1, 5000000],
'cmd': 'timeout connect %d'
}
},
'vip': {
'http_server_close': {
'type': bool,
'limits': ['True', 'False'],
'cmd': '%soption http-server-close'
},
'rate_limit_sessions': {
'type': int,
'limits': [1, 65535],
'cmd': 'rate-limit sessions %d'
}
},
'pool': {},
}
def validate_custom_attributes(config, section):
section_dict = {}
if 'custom-attributes' in config and section in custom_attributes_dict:
custom_attributes = config['custom-attributes']
for key, value in custom_attributes.iteritems():
if key in custom_attributes_dict[section]:
#Sanitize the value
try:
type_attr = custom_attributes_dict[section][key]['type']
limits = custom_attributes_dict[section][key]['limits']
if type_attr == int:
value = type_attr(value)
if value in range(limits[0], limits[1]):
section_dict.update({key:value})
elif type_attr == str:
if len(value) in range(limits[0], limits[1]):
section_dict.update({key:value})
elif type_attr == bool:
if value in limits:
if value == 'True':
value = ''
elif value == 'False':
value = 'no '
section_dict.update({key:value})
except Exception as e:
print "Skipping key: %s, value: %s due to validation failure" \
% (key, value)
continue
return section_dict
| 0.002068 |
"""
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from collections import defaultdict
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from functools import wraps
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, _adjust_start_date_for_beta_testers
from courseware.courses import get_courses, get_course, get_studio_url, get_course_with_access, sort_by_announcement
from courseware.courses import sort_by_start_date
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory, LTIComponent
from course_modes.models import CourseMode
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from open_ended_grading import open_ended_notifications
from student.models import UserTestGroup, CourseEnrollment
from student.views import single_course_reverification_info, is_course_blocked
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.tabs import CourseTabList, StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
#START DEKKER
from django.contrib.auth import authenticate, logout, login
from django.views.decorators.csrf import csrf_exempt
from oauthlib.oauth1.rfc5849 import signature
from collections import OrderedDict
import urllib
from student.models import LTIUserAuth
#END DEKKER
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(user, request, parent_descriptor, field_data_cache, current_module.location.course_key)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@csrf_exempt
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
user = request.user
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course_key)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:[email protected]/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_key),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, section_descriptor, depth=None, asides=XBlockAsidesConfig.possible_asides()
)
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404("Position {} is not an integer!".format(position))
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
section_field_data_cache,
course_key,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
### DEKKER
if request.session.get('lti_view'):
request.session['lti_view'] = False
if course.lti_enabled and section_module.lti_enabled:
lti = request.session.get('lti_vars')
context['disable_accordion'] = True
context['disable_tabs'] = True
context['suppress_module_navigation'] = True
else:
raise PermissionDenied
### END DEKKER
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
(course_key, chapter, section, position) = path_to_location(modulestore(), usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_key.to_deprecated_string())
elif section is None:
return redirect('courseware_chapter', course_id=course_key.to_deprecated_string(), chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
@csrf_exempt
def jump_to_lti(request, course_id, location):
#START DEKKER
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
(course_key, chapter, section, position) = path_to_location(modulestore(), usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
if 'oauth_consumer_key' in request.POST:
lti_details = LTIComponent.objects.filter(course_id=course_key, module_id=usage_key, key=request.POST['oauth_consumer_key'])
lti_user = LTIUserAuth.objects.filter(lti_user_id=request.POST['user_id'])
if 'oauth_consumer_key' in request.POST and len(lti_details) > 0:
lti_consumer_secret = lti_details[0].secret
#Request Method
lti_request_http_method = unicode('POST')
#Request URL
lti_request_uri = unicode(urllib.unquote(request.build_absolute_uri()))
uri = signature.normalize_base_string_uri(lti_request_uri)
#Request parameters
resp = dict(request.POST.dict())
orderedresp = OrderedDict(sorted(resp.items(), key=lambda t: t[0]))
query_string = urllib.urlencode(orderedresp)
oauth_headers = dict(signature.collect_parameters(query_string))
lti_request_params = signature.normalize_parameters(oauth_headers.items())
base_string = signature.construct_base_string(lti_request_http_method, uri, lti_request_params)
check_sig = signature.sign_hmac_sha1(base_string, unicode(lti_consumer_secret), None)
if check_sig == request.POST['oauth_signature']:
valid_user = False
if len(lti_user) > 0:
user = lti_user[0].user
user.backend = 'django.contrib.auth.backends.ModelBackend'
user.save()
login(request, user)
valid_user = True
pass
else:
if request.user.is_authenticated():
#Assume for the moment the logged in user is the attached user
print "NOT ATTACHED TO LTI"
new_lti_user = LTIUserAuth(
user=request.user,
roles=request.POST['roles'],
institution="Unknown",
lti_user_id=request.POST['user_id'],
lti_data=json.dumps(request.POST, sort_keys=True),
lti_email="Unknown"
)
new_lti_user.save()
valid_user = True
else:
request.session['lti_login'] = 'true'
request.session['lti_details'] = dict(request.POST.dict())
redir = str(redirect('courseware_section', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section))
redir = redir[redir.rfind('Location:')+10:]
redir = redir.split("\n")[0]
request.session['lti_redirect'] = redir
return redirect(settings.LOGIN_URL)
pass
if valid_user:
request.session['lti_view'] = 'true'
request.session['lti_vars'] = dict(request.POST.dict())
else:
raise PermissionDenied
else:
raise PermissionDenied
else:
raise PermissionDenied
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_key.to_deprecated_string())
elif section is None:
return redirect('courseware_chapter', course_id=course_key.to_deprecated_string(), chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_key.to_deprecated_string(), chapter=chapter, section=section, position=position)
#END DEKKER
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_key)
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(request.user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'studio_url': studio_url,
'reverifications': reverifications,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(request.user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = has_access(request.user, 'staff', course)
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if (_is_shopping_cart_enabled):
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id.to_deprecated_string())
# Used to provide context to message to student if enrollment not allowed
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'is_shopping_cart_enabled': _is_shopping_cart_enabled,
'cart_link': reverse('shoppingcart.views.show_cart'),
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_key)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_key):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_key)
course = modulestore().get_course(course_key)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(
course_id=course_key,
module_state_key=usage_key,
student_id=student.id
)
except User.DoesNotExist:
return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.type in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.type](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
| 0.002691 |
import numpy as np
import theano
import theano.tensor as T
import lasagne
from collections import OrderedDict
from settings import CHAR_DIM, C2W_HDIM, WDIM, SCALE, N_BATCH, GRAD_CLIP, REGULARIZATION, LEARNING_RATE, MOMENTUM, GAMMA
NL1 = lasagne.nonlinearities.sigmoid
NL2 = lasagne.nonlinearities.tanh
NL3 = lasagne.nonlinearities.tanh
LR = lasagne.regularization.l2
# margin cost defined in TransE
def margincost(pos_loss, neg_loss, margin):
out = margin + pos_loss - neg_loss
return T.sum(out * (out > 0))
# L2 distance between two Theano tensors, compute L2 distance for every row
def L2dist(left, right):
return T.sqrt(T.sum(T.sqr(left - right), axis=1))
class charLM(object):
def __init__(self, n_char, n_lhs, n_rel, n_rhs, emb_dim=WDIM, pretrained=None): # is WDIM the RNN embedding dimension? yes
# params
if pretrained==None:
self.params = OrderedDict()
self.params = init_params(self.params, n_char, n_rel, n_rhs, emb_dim) # define n_rhs, emb_dim
else:
self.params = load_params_shared(pretrained)
self.n_rhs = n_rhs
# model
in_lhs, in_lmask, in_lhsn, in_lmaskn, emb_lhs, emb_lhsn, l_encoder = char2vec(self.params, n_char)
# TODO maybe concatenate RNN embedding with look up table? Do it later. Use a lasagne layer to compress (linear)
in_rhs, in_rhsn, emb_rhs, emb_rhsn = embedding_rhs(self.params, n_rhs, emb_dim)
in_rel, emb_rel = embedding_rel(self.params, n_rel, emb_dim)
# N_BATCH for input size? or just None, because later we need to do validation and testing, can uses any size
# define loss
pred_rhs = emb_lhs + emb_rel # true lhs + rel
pred_lhs = emb_lhsn + emb_rel # negative lhs + rel
pred_rel = emb_rhs - emb_lhs # predicted relation, rhs - lhs, for visualization
pos_loss = L2dist(pred_rhs, emb_rhs) # positive triple distance
neg_loss_r = L2dist(pred_rhs, emb_rhsn) # negative triple distance with corrupted rhs
neg_loss_l = L2dist(pred_lhs, emb_rhs) # negative triple distance with corrupted lhs
loss_rn = margincost(pos_loss, neg_loss_r, GAMMA) # GAMMA is the margin, GAMMA = 1.0 in TransE
loss_ln = margincost(pos_loss, neg_loss_l, GAMMA)
loss = loss_rn + loss_ln
# do we need loss_ln? Yes, and how do we sample random lhs embedding? build a dict too
self.cost = T.mean(loss) + REGULARIZATION*lasagne.regularization.apply_penalty(lasagne.layers.get_all_params(l_encoder), LR)
# can we only add regularization to the RNN parameters? yes, only pass RNN parameters
cost_only = T.mean(loss)
'''get_output can specify input, so don't need to define another embedding layer'''
# updates
self.lr = LEARNING_RATE
self.mu = MOMENTUM
updates = lasagne.updates.nesterov_momentum(self.cost, self.params.values(), self.lr, momentum=self.mu)
# try different lr, momentum
# theano functions
self.inps = [in_lhs, in_lmask, in_lhsn, in_lmaskn, in_rel, in_rhs, in_rhsn] # inputs for the function
self.cost_fn = theano.function(self.inps,cost_only)
self.encode_fn = theano.function([in_lhs, in_lmask], emb_lhs) # compute RNN embeddings given word (drug name)
self.train_fn = theano.function(self.inps,self.cost,updates=updates)
self.pred_right_fn = theano.function([in_lhs, in_lmask, in_rel], pred_rhs) # compute lhs + rel as predicted rhs
self.emb_right_fn = theano.function([in_rhs], emb_rhs) # compute only rhs embedding
self.pred_rel_fn = theano.function([in_lhs, in_lmask, in_rhs], pred_rel)
def pred_rel(self, in_lhs, in_lmask, in_rhs):
return self.pred_rel_fn(in_lhs, in_lmask, in_rhs)
def train(self, in_lhs, in_lmask, in_lhsn, in_lmaskn, in_rel, in_rhs, in_rhsn):
return self.train_fn(in_lhs, in_lmask, in_lhsn, in_lmaskn, in_rel, in_rhs, in_rhsn)
def validate(self, in_lhs, in_lmask, in_lhsn, in_lmaskn, in_rel, in_rhs, in_rhsn):
return self.cost_fn(in_lhs, in_lmask, in_lhsn, in_lmaskn, in_rel, in_rhs, in_rhsn)
def compute_emb_right_all(self): # compute a (n_rhs * emb_dim) numpy matrix, each row is an embedding for a right hand side entity
in_rhs_all = np.arange(self.n_rhs).astype('int32') # input pretend to compute the embedding for all right hand side entities
self.emb_right_all = self.emb_right_fn(in_rhs_all)
def encode(self, in_lhs, in_lmask):
return self.encode_fn(in_lhs, in_lmask)
def rank_right(self, in_lhs, in_lmask, in_rel, in_rhs): # return a len(in_lhs) size list, each element is the rank of the true rhs among all the rhs
pred_rhs_batch = self.pred_right_fn(in_lhs, in_lmask, in_rel)
right_ranks = []
for i in range(pred_rhs_batch.shape[0]):
true_idx = in_rhs[i]
distances = np.zeros(self.emb_right_all.shape[0])
for j in range(self.emb_right_all.shape[0]):
distances[j] = np.linalg.norm(pred_rhs_batch[i, :] - self.emb_right_all[j, :], 2)
rank = np.argsort(np.argsort(distances))
right_ranks += [rank[true_idx]]
return right_ranks
def update_learningrate(self):
self.lr = max(1e-5,self.lr / 2)
updates = lasagne.updates.nesterov_momentum(self.cost, self.params.values(), self.lr, momentum=self.mu)
self.train_fn = theano.function(self.inps,self.cost,updates=updates)
def save_model(self,save_path):
saveparams = OrderedDict()
for kk,vv in self.params.iteritems():
saveparams[kk] = vv.get_value()
np.savez(save_path,**saveparams)
def print_params(self):
for kk,vv in self.params.iteritems():
print("Param {} Max {} Min {}".format(kk, np.max(vv.get_value()), np.min(vv.get_value())))
def init_params(params, n_char, n_rel, n_rhs, emb_dim):
np.random.seed(0)
# lookup table # TODO when using float 32, there will be an error in theano
# "An update must have the same type as the original shared variable", why is that
params['Wc'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(n_char,CHAR_DIM)).astype('float64'), name='Wc')
# f-GRU
params['W_c2w_f_r'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_f_r')
params['W_c2w_f_z'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_f_z')
params['W_c2w_f_h'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_f_h')
params['b_c2w_f_r'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_f_r')
params['b_c2w_f_z'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_f_z')
params['b_c2w_f_h'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_f_h')
params['U_c2w_f_r'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_f_r')
params['U_c2w_f_z'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_f_z')
params['U_c2w_f_h'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_f_h')
# b-GRU
params['W_c2w_b_r'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_b_r')
params['W_c2w_b_z'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_b_z')
params['W_c2w_b_h'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(CHAR_DIM,C2W_HDIM)).astype('float64'), name='W_c2w_b_h')
params['b_c2w_b_r'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_b_r')
params['b_c2w_b_z'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_b_z')
params['b_c2w_b_h'] = theano.shared(np.zeros((C2W_HDIM)).astype('float64'), name='b_c2w_b_h')
params['U_c2w_b_r'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_b_r')
params['U_c2w_b_z'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_b_z')
params['U_c2w_b_h'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(C2W_HDIM,C2W_HDIM)).astype('float64'), name='U_c2w_b_h')
# dense
params['W_c2w'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(2*C2W_HDIM,WDIM)).astype('float64'), name='W_c2w_df')
params['b_c2w'] = theano.shared(np.zeros((WDIM)).astype('float64'), name='b_c2w_df')
# Initialize parameters for rhs entity embedding
params['W_emb_rhs'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(n_rhs, emb_dim)).astype('float64'), name='W_emb_rhs')
# Initialize parameters for relation embedding
params['W_emb_rel'] = theano.shared(np.random.normal(loc=0., scale=SCALE, size=(n_rel, emb_dim)).astype('float64'), name='W_emb_rel')
return params
def char2vec(params,n_char,bias=True):
'''
Bi-GRU for encoding input
'''
# Variables for positive lhs
word = T.imatrix() # B x N # input
mask = T.fmatrix() # B x N # input
# Variables for negative lhs
wordn = T.imatrix() # B x N # input
maskn = T.fmatrix() # B x N # input
# Input layer over characters
l_in_source = lasagne.layers.InputLayer(shape=(N_BATCH,None), name='input')
# Mask layer for variable length sequences
l_mask = lasagne.layers.InputLayer(shape=(N_BATCH,None), name='mask')
# lookup
l_clookup_source = lasagne.layers.EmbeddingLayer(l_in_source, input_size=n_char, output_size=CHAR_DIM, W=params['Wc'])
# f-GRU
c2w_f_reset = lasagne.layers.Gate(W_in=params['W_c2w_f_r'], W_hid=params['U_c2w_f_r'], W_cell=None, b=params['b_c2w_f_r'], nonlinearity=NL1)
c2w_f_update = lasagne.layers.Gate(W_in=params['W_c2w_f_z'], W_hid=params['U_c2w_f_z'], W_cell=None, b=params['b_c2w_f_z'], nonlinearity=NL1)
c2w_f_hidden = lasagne.layers.Gate(W_in=params['W_c2w_f_h'], W_hid=params['U_c2w_f_h'], W_cell=None, b=params['b_c2w_f_h'], nonlinearity=NL2)
l_fgru_source = lasagne.layers.GRULayer(l_clookup_source, C2W_HDIM, resetgate=c2w_f_reset, updategate=c2w_f_update, hidden_update=c2w_f_hidden, hid_init=lasagne.init.Constant(0.), backwards=False, learn_init=True, gradient_steps=-1, grad_clipping=GRAD_CLIP, unroll_scan=False, precompute_input=True, mask_input=l_mask)
# b-GRU
c2w_b_reset = lasagne.layers.Gate(W_in=params['W_c2w_b_r'], W_hid=params['U_c2w_b_r'], W_cell=None, b=params['b_c2w_b_r'], nonlinearity=NL1)
c2w_b_update = lasagne.layers.Gate(W_in=params['W_c2w_b_z'], W_hid=params['U_c2w_b_z'], W_cell=None, b=params['b_c2w_b_z'], nonlinearity=NL1)
c2w_b_hidden = lasagne.layers.Gate(W_in=params['W_c2w_b_h'], W_hid=params['U_c2w_b_h'], W_cell=None, b=params['b_c2w_b_h'], nonlinearity=NL2)
l_bgru_source = lasagne.layers.GRULayer(l_clookup_source, C2W_HDIM, resetgate=c2w_b_reset, updategate=c2w_b_update, hidden_update=c2w_b_hidden, hid_init=lasagne.init.Constant(0.), backwards=True, learn_init=True, gradient_steps=-1, grad_clipping=GRAD_CLIP, unroll_scan=False, precompute_input=True, mask_input=l_mask)
# Slice final states
l_f_source = lasagne.layers.SliceLayer(l_fgru_source, -1, 1)
l_b_source = lasagne.layers.SliceLayer(l_bgru_source, 0, 1)
# Dense
l_concat = lasagne.layers.ConcatLayer((l_f_source,l_b_source),axis=1)
if bias:
l_c2w_source = lasagne.layers.DenseLayer(l_concat, WDIM, W=params['W_c2w'], b=params['b_c2w'], nonlinearity=NL3)
else:
l_c2w_source = lasagne.layers.DenseLayer(l_concat, WDIM, W=params['W_c2w'], b=None, nonlinearity=NL3)
emb_lhs = lasagne.layers.get_output(l_c2w_source, inputs={l_in_source: word, l_mask: mask})
emb_lhsn = lasagne.layers.get_output(l_c2w_source, inputs={l_in_source: wordn, l_mask: maskn})
return word, mask, wordn, maskn, emb_lhs, emb_lhsn, l_c2w_source
#return word, mask, l_c2w_source # return input variables and output variables
# by Yuxing Zhang
def embedding_rhs(params, n_rhs, emb_dim):
'''
Embedding part for right hand side entity embedding and right hand side negative entity embedding
:param params: dict to store parameters
'''
# input variables that is right hand side entity
emb_in_rhs = T.ivector() # B * 1 vector, where each row is a number between 0 and (n_rhs - 1) as the index
emb_in_rhsn = T.ivector() # B * 1 vector, where each row is a number between 0 and (n_rhs - 1) as the index
# Input layer over entity
l_in_rhs = lasagne.layers.InputLayer(shape=(N_BATCH, ), name = 'rhs_input') # removing input_var to reuse it for negative rhs
# Embedding layer for rhs entity, and emb_dim should equal # the embedding dimension from RNN model.
l_emb_rhs = lasagne.layers.EmbeddingLayer(l_in_rhs, input_size=n_rhs, output_size=emb_dim, W=params['W_emb_rhs'])
return emb_in_rhs, emb_in_rhsn, lasagne.layers.get_output(l_emb_rhs, emb_in_rhs), lasagne.layers.get_output(l_emb_rhs, emb_in_rhsn)
# by Yuxing Zhang
def embedding_rel(params, n_rel, emb_dim):
'''
Embedding part for right hand side entity embedding
:param params: dict to store parameters
'''
# input variables that is the relation index
emb_in_rel = T.ivector() # B * 1 vector, where each row is a number between 0 and (n_rel - 1) as the index
# Input layer over relation
l_in_rel = lasagne.layers.InputLayer(shape=(N_BATCH, ), input_var=emb_in_rel, name = 'rel_input')
# Embedding layer for relation, and emb_dim should equal # the embedding dimension from RNN model.
l_emb_rel = lasagne.layers.EmbeddingLayer(l_in_rel, input_size=n_rel, output_size=emb_dim, W=params['W_emb_rel'])
return emb_in_rel, lasagne.layers.get_output(l_emb_rel)
def load_params(path):
"""
Load previously saved model
"""
params = OrderedDict()
with open(path,'r') as f:
npzfile = np.load(f)
for kk, vv in npzfile.iteritems():
params[kk] = vv
return params
def load_params_shared(path):
"""
Load previously saved model
"""
params = OrderedDict()
with open(path,'r') as f:
npzfile = np.load(f)
for kk, vv in npzfile.iteritems():
params[kk] = theano.shared(vv, name=kk)
return params
| 0.010622 |
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations when
dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
__docformat__ = "restructuredtext en"
import os
from shutil import rmtree, copyfile, copyfileobj
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way that
an instance of `_FileOpeners` itself can be indexed with the keys of that
dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return self._file_openers.keys()
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
`destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by path.
Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created. The
default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
copyfileobj(openedurl, f)
finally:
f.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return
the path to the cached file.
If path is a local file, _findfile will return a path to that local
file.
The search will include possible compressed versions of the file and
return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or directory)
to all the files it handles. Use `Repository` when you will be working
with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the DataSource
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError, \
"Directory listing of URLs, not supported yet."
else:
return os.listdir(self._baseurl)
| 0.000679 |
import json
from threading import Lock
import struct
def _to_bin(trace):
source = trace['source']
destination = trace['destination']
oneDimension = []
for hop in trace['hops']:
count, ip, time = hop
oneDimension.extend([ip, count, time])
format_str = 'QQ' + 'QII' * len(trace['hops'])
format_length = struct.calcsize(format_str)
return struct.pack('<Q' + format_str, format_length, source, destination, *oneDimension)
class Writer(object):
def __init__(self, filename):
self.filename = filename
self.next_to_write = 0
self.waiting = {}
self.lock = Lock()
def _queue(self, i, trace):
self.waiting[i] = trace
def _write_queue(self):
with self.lock:
with open(self.filename, "ab") as f:
while self.waiting.has_key(self.next_to_write):
trace = self.waiting[self.next_to_write]
data = _to_bin(trace)
f.write(data)
del self.waiting[self.next_to_write]
self.next_to_write += 1
def write(self, order, trace):
if not trace:
return False
self._queue(order, trace.__dict__)
self._write_queue()
return True
def count(self):
try:
with open(self.filename) as f:
return sum (1 for line in f)
except IOError:
return 0
| 0.004141 |
"""
Israeli-specific form helpers
"""
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.core.validators import EMPTY_VALUES
from django.forms.fields import RegexField, Field, EMPTY_VALUES
from django.utils.checksums import luhn
from django.utils.translation import ugettext_lazy as _
# Israeli ID numbers consist of up to 8 digits followed by a checksum digit.
# Numbers which are shorter than 8 digits are effectively left-zero-padded.
# The checksum digit is occasionally separated from the number by a hyphen,
# and is calculated using the luhn algorithm.
#
# Relevant references:
#
# (hebrew) http://he.wikipedia.org/wiki/%D7%9E%D7%A1%D7%A4%D7%A8_%D7%96%D7%94%D7%95%D7%AA_(%D7%99%D7%A9%D7%A8%D7%90%D7%9C)
# (hebrew) http://he.wikipedia.org/wiki/%D7%A1%D7%A4%D7%A8%D7%AA_%D7%91%D7%99%D7%A7%D7%95%D7%A8%D7%AA
id_number_re = re.compile(r'^(?P<number>\d{1,8})-?(?P<check>\d)$')
class ILPostalCodeField(RegexField):
"""
A form field that validates its input as an Israeli postal code.
Valid form is XXXXX where X represents integer.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXX'),
}
def __init__(self, *args, **kwargs):
super(ILPostalCodeField, self).__init__(r'^\d{5}$', *args, **kwargs)
def clean(self, value):
if value not in EMPTY_VALUES:
value = value.replace(" ", "")
return super(ILPostalCodeField, self).clean(value)
class ILIDNumberField(Field):
"""
A form field that validates its input as an Israeli identification number.
Valid form is per the Israeli ID specification.
"""
default_error_messages = {
'invalid': _('Enter a valid ID number.'),
}
def clean(self, value):
value = super(ILIDNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = id_number_re.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
value = match.group('number') + match.group('check')
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
| 0.001346 |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 08/19/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal
from skbeam.core.constants.xrs import HKL, calibration_standards
from skbeam.core.utils import q_to_d, d_to_q
def smoke_test_powder_standard():
name = 'Si'
cal = calibration_standards[name]
assert(name == cal.name)
for d, hkl, q in cal:
assert_array_almost_equal(d_to_q(d), q)
assert_array_almost_equal(q_to_d(q), d)
assert_array_equal(np.linalg.norm(hkl), hkl.length)
assert_equal(str(cal), "Calibration standard: Si")
assert_equal(len(cal), 11)
def test_hkl():
a = HKL(1, 1, 1)
b = HKL('1', '1', '1')
c = HKL(h='1', k='1', l='1')
d = HKL(1.5, 1.5, 1.75)
assert_equal(a, b)
assert_equal(a, c)
assert_equal(a, d)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 0 |
"""
Created on Wed Jun 24 11:04:10 2015
Learn T1 NMR experiement run on TOPSPIN
T1 inversion recovery model defined in find_T1_model class
includes calls to run TOPSPIN commands- NMR experiment
@author: Kissan Mistry
"""
#imports and intializations
from __future__ import division
from t1_model import T1Model
from qinfer.distributions import UniformDistribution
#from qinfer.distributions import NormalDistribution
from qinfer.smc import SMCUpdater
from qinfer.resamplers import LiuWestResampler
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import time
import Lorentzian_fit as LF
from qinfer.expdesign import ExperimentDesigner
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
model = T1Model()
prior = UniformDistribution(np.array([0, 100]))
N_particles=100000
updater = SMCUpdater(model, N_particles, prior, resampler=LiuWestResampler(0.98),zero_weight_policy='reset')
designer=ExperimentDesigner(updater,opt_algo=1)
#Set the value of T1 to Learn, pick 1 value from prior
#true_model=prior.sample()
true_model=np.array([6.77], dtype=model.expparams_dtype)
performance_dtype = [
('expparams', 'float'),
('sim_outcome', 'float'),
('est_mean', 'float'),
]
#NMR EXPERIMENT Initialization*******************************
#going to normalize Mo max of 1.
#model.Mo=float(raw_input('Please enter Mo: '))
#dummy=float(raw_input('Waiting for Mo: '))
#Mo_norm=LF.lorentzfit('1_spectrum.txt')
#model.Mo=(Mo_norm/Mo_norm)
#
#to save output data
timestr = time.strftime("%Y%m%d-%H%M%S")
Saver = PdfPages(timestr+'.pdf')
save_exp=open(timestr+'_exp.txt','w')
save_out=open(timestr+'_out.txt','w')
save_mean=open(timestr+'_mean.txt','w')
#iterative process to find T1
trials=20
data = np.zeros((trials, 1), dtype=performance_dtype)
for idx_trials in xrange(trials):
log.info('trial: ' + str(idx_trials))
#CHOOSE EXPERIMENTAL PARAMETER****************************
guess_iter=50
guess_vec=np.zeros((guess_iter,1))
risk_vec=np.zeros((guess_iter,1))
designer.new_exp()
store_risk=100000000
for idx_guess in xrange(guess_iter):
# print 'guess iteration: '+ str(idx_guess)
# guess=np.array([[[0.1+(0.1*idx_guess)]]],dtype=model.expparams_dtype) #sweep guess/incremental increase
guess=np.array([model.particle_guess_heuristic(updater, 10000)], dtype=model.expparams_dtype) #generate guess from PGH
# print 'Your Guess is: '+ str(guess)
#evaluate bayes risk for the guess
current_risk=updater.bayes_risk(guess)
# print 'bayes_risk: ' + str(current_risk)
if current_risk<store_risk:
store_risk=current_risk
expparams=guess
risk_vec[idx_guess]=current_risk
guess_vec[idx_guess]=guess
log.debug('Your Tau is: ' + str(expparams))
#optimize that guess
# expparams=designer.design_expparams_field(guess,0,cost_scale_k=1,disp=False,maxiter=10000,maxfun=10000,store_guess=True,grad_h=1,)
# print 'Your Tau is: ' + str(expparams)
fig = plt.figure()
plt.scatter(guess_vec,risk_vec,s=1)
plt.title('Bayes Risk of Guesses, Best Guess= '+str(expparams))
plt.ylabel('Bayes Risk')
plt.xlabel(r'$\tau$'+' Guess')
Saver.savefig()
#THIS MANUALLY COMPARES THE BAYES RISK OF THE GUESS VALUE AND THE OPTIMIZED VALUE AND PLOTS IT FOR SHOW,
#TO SEE HOW IT IS CHOOSING THE BEST VALUE.
# guess_iter=100
# guess_vec=np.zeros((guess_iter,1))
# grisk_vec=np.zeros((guess_iter,1))
# tau_vec=np.zeros((guess_iter,1))
# trisk_vec=np.zeros((guess_iter,1))
# designer.new_exp()
# for idx_guess in xrange(guess_iter):
# print 'guess iteration: '+ str(idx_guess)
# guess=np.array([model.particle_guess_heuristic(updater,10000)],dtype=model.expparams_dtype )
# guess_risk=updater.bayes_risk(guess)
# print 'Your Guess is: '+ str(guess)
# guess_vec[idx_guess]=guess
# grisk_vec[idx_guess]=guess_risk
# expparams=designer.design_expparams_field(guess,0,cost_scale_k=10,disp=False,maxiter=10000,maxfun=10000,store_guess=False,grad_h=1,)
# tau_risk=updater.bayes_risk(expparams)
# print 'Your Tau is: ' + str(expparams)
# tau_vec[idx_guess]=expparams
# trisk_vec[idx_guess]=tau_risk
# fig1=plt.figure()
# plt.scatter(guess_vec,grisk_vec)
# fig2=plt.figure()
# plt.scatter(tau_vec,trisk_vec)
# expparams=np.array([guess_vec[np.argmin(grisk_vec)]],dtype=model.expparams_dtype)
#Try getting quantity for Fisher Information and Score
# score=model.score()
## expparams=np.array([np.linspace(1, 10, 1000)])
# expparams=model.pgh(updater,10000) #generate guess from PGH
#
# fisher=model.fisher_information(true_model,expparams)
#
#SIMULATE*******************************************************
#simulate outcomes- based on the true T1, and the chosen intial value
#will be replaced by actual data collection from NMR for Mz values
sim_outcome=model.simulate_experiment(true_model,expparams)
outcome=sim_outcome
#NMR EXPERIMENT*************************************************
#USE this instead of simualate when doing experiments in NMR
# outcome=np.array([[[float(raw_input('Enter obtained Mz: '))]]])
# dummy=float(raw_input('waiting for Mz'))
# Mz_value=LF.lorentzfit(str(idx_trials+2)+'_spectrum.txt')
# outcome=np.array([[[Mz_value/abs(Mo_norm)]]])
#Run SMC and update the posterior distribution
updater.update(outcome,expparams,check_for_resample=True)
#STORE DATA******************************************
data[idx_trials]['est_mean'] = updater.est_mean()
data[idx_trials]['sim_outcome'] = outcome
data[idx_trials]['expparams'] = expparams
save_exp.writelines(str(expparams)+'\n')
save_mean.write(str(updater.est_mean())+'\n')
save_out.write(str(outcome)+'\n')
# PLOT *******************************************
#plotting particles and weights
particles = updater.particle_locations
weights = updater.particle_weights
if idx_trials==0:
maxw=max(weights)
weights=weights/maxw #normalize the posterior
fig1 = plt.figure()
plt.axvline(updater.est_mean(), linestyle = '--', c = 'blue', linewidth =2,label='Est. Mean')
plt.axvline(true_model, linestyle = '--', c = 'red', linewidth = 2,label='True Model')
plt.scatter(particles,weights,s=0.1)
plt.title('Posterior Distribution T1= '+str(updater.est_mean()))
plt.ylabel('Normalized Weight')
plt.xlabel('Particles')
plt.legend()
Saver.savefig()
#END LOOP***************************************************
Saver.close()
save_exp.close()
save_mean.close()
save_out.close() | 0.018824 |
####################################################################
####################################################################
#### The Awesome Simulator, DF-ISE FileIO Module ####
#### ####
#### Author: Abhejit Rajagopal, [email protected] ####
#### ####
#### (DFISE.py) is part of the AwSimLib software package ####
#### module DFISE.py: /fileIO/DFISE.py ####
#### ####
#### This software is free to use, subjct to terms and ####
#### conditions in the AwSimLib license (LGPLv3). ####
#### AwSimLib license: ../_LICENSE.TXT ####
####################################################################
####################################################################
####################################################################
#### Version History ####
####################################################################
#### 0.1 09/01/2013 - classes, combine by line ####
#### 0.2 12/12/2013 - combine v2.py ####
#### 0.3 04/10/2014 - read.py, unified ####
#### 0.4 05/20/2014 - lumeric, csv, ####
#### 0.5 09/02/2014 - AwSimLib initial release ####
#### ####
#### Part of the AwSimLib software package. ####
#### Copyright (C) 2014 Abhejit Rajagopal ####
####################################################################
####################################################################
####################################################################
#### Helper Functions: --> Debug ####
####################################################################
## system libs
import sys
orig_stdout = sys.stdout
def printHEADER(string):
pass
#string = string
#print (string)
def printINFO(string):
pass
#string = string
#print (string)
def printDATA(string):
pass
#string = string
#print (string)
def printADD(string):
pass
#string = string
#print (string)
####################################################################
####################################################################
####################################################################
#### Class Definitions:--> Make a DFISE_File object ####
####################################################################
class DFISE_DATfile:
## DF-ISE Data ('.dat') File handler
def __init__(self, *filenames):
self.filename = 'FilenameNotSpecified.noext'
self.INFO = Info()
self.DATA = Data()
# optional filename options to pre-load data
if len(filenames)==0:
print ('Empty DFISE_DATFile: no filename specified.')
elif len(filenames)==1:
self.filename = str(filenames[0])
#
## end DF-ISE object class
class Info:
## class to represent header in a '.dat' file
def __init__(self):
self.version = []
self.type = []
self.dimension = []
self.nb_vertices= []
self.nb_edges = []
self.nb_faces = []
self.nb_elements= []
self.nb_regions = []
self.datasets = []
self.functions = []
def setField(self, field, value):
#applies a value to a field
if (field == "version"):
self.version = value
elif (field == "type"):
self.type = value
elif (field == "dimension"):
self.dimension = value
elif (field == "nb_vertices"):
self.nb_vertices = value
elif (field == "nb_edges"):
self.nb_edges = value
elif (field == "nb_faces"):
self.nb_faces = value
elif (field == "nb_elements"):
self.nb_elements = value
elif (field == "nb_regions"):
self.nb_regions = value
elif (field == "datasets"):
self.datasets = value
elif (field == "functions"):
self.functions = value
## end Info class
class Data:
## class to represent data in a '.dat' file
def __init__(self):
self.numDatasets= [] # of datasets
self.datasets = [] #list of datasets
def setNum(self, number):
#sets numDatasets
#makes appropriate number of Dataset variables to store in datasets
self.numDatasets = number
setX = []
for i in range(number) :
setX.append(Dataset())
self.datasets = setX
def setField (self, counter, field, value):
#sets value of field in datasets[counter]
#print "field== " + str(field.strip()) + " value== " + str(value) + " length== " + str(len(self.datasets))
self.datasets[counter].setField(field.strip(),value)
def retData(self,counter):
return self.datasets[counter]
## end Data class
class Dataset:
## class to represent each dataset within data in a '.dat' file
def __init__(self):
self.dataname = []
self.function = []
self.type = []
self.dimension = []
self.location = []
self.validity = []
self.numValues = []
self.Values = []
def setField(self, field, value):
#applies a value to a field
if (field == "dataname"):
self.dataname = value
elif (field == "function"):
self.function = value
elif (field == "type"):
self.type = value
elif (field == "dimension"):
self.dimension = value
elif (field == "location"):
self.location = value
elif (field == "validity"):
self.validity = value
elif (field == "numValues"):
#print "NumVALUES! == " + str(value)
self.numValues = value
elif (field == "Values"):
#self.Values.append(value)
self.Values = value
## end Dataset class
####################################################################
####################################################################
####################################################################
#### Functions: --> File Ops ####
####################################################################
def readDAT(filenames):
#### File Parser for DF-ISE data ('.dat') files
####
#### In: List of filenames, e.g.: filenames=['testgrid1.dat'(, ...)]
####
#### Out: List of DF-ISE objects, e.g.: to_return=[dfise_object1,(, ...)]
####
####
## libraries provided by system
import numpy as np
import sys
import glob
##
to_return = [] #list of DFISE_DATfile
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE readDAT')
if type(filenames)!=list: #correct if only 1 item provided
filenames = [filenames]
#
i = 0
for filename in filenames:
grabNull = 0
grabHead = 1
grabInfo = 0
grabData = 0
grabDataVALS = 0
print ('~~~~~~~~~~~~~~~~~~~~')
f = open (filename,"r")
print ("processing: " + str(filename))
DFISE_obj = DFISE_DATfile(str(filename))
j = -1 #dataset counter
for line in f:
if grabHead == 1: ## check file type/header on first line ##
split_space = line.split(' ')
#good
if split_space[0] == "DF-ISE":
print ("-->Header OK: DF-ISE text")
grabHead = 0
grabNull = 1
continue
#bad
else:
print ("~~~~")
print (" was expecting a DF-ISE file, check file header")
sys.exit(0)
elif grabNull == 1:
split_space = line.split(' ')
if split_space[0] == 'Info':
print ("-->Info section identified")
grabInfo = 1
grabNull = 0
continue
elif split_space[0] == 'Data':
print ("-->Data section identified")
grabData = 1
grabNull = 0
continue
elif split_space[0].strip() == '':
printHEADER( "..blankline.." )
continue
else:
print ("~~~~")
print ("ERROR SHOULD NOT BE HERE -- grabNull == 1")
sys.exit(0)
elif grabInfo == 1:
split_equ = line.split('=')
field = split_equ[0].strip()
if len(split_equ) > 1:
quantity = split_equ[1]
elif split_equ[0].strip() == '}': #end criteria
print ("--end of Info section.")
grabInfo = 0
grabNull = 1
else:
print ("~~~~")
print ("ERROR SHOULD NOT BE HERE -- grabInfo == 1")
sys.exit(0)
if field == "version":
Info_version = float(quantity.strip()) #float
DFISE_obj.INFO.setField(field, Info_version)
printINFO( "version = " + str(Info_version) )
elif field == "type":
Info_type = quantity.strip() #string
DFISE_obj.INFO.setField(field, Info_type)
printINFO( "type = " + str(Info_type))
elif field == "dimension":
Info_dimension = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_dimension)
printINFO( "dimension = " + str(Info_dimension) )
elif field == "nb_vertices":
Info_nb_vertices = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_vertices)
printINFO( "nb_vertices = " + str(Info_nb_vertices) )
elif field == "nb_edges":
Info_nb_edges = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_edges)
printINFO( "nb_edges = " + str(Info_nb_edges) )
elif field == "nb_faces":
Info_nb_faces = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_faces)
printINFO( "nb_faces = " + str(Info_nb_faces) )
elif field == "nb_elements":
Info_nb_elements = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_elements)
printINFO( "nb_elements = " + str(Info_nb_elements) )
elif field == "nb_regions":
Info_nb_regions = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_regions)
printINFO( "nb_regions = " + str(Info_nb_regions) )
elif field == "datasets":
Info_nb_datasets = quantity.split('[ "')[1].split('" ]')[0].split('" "') #list of str
Info_num_datasets = int(len(Info_nb_datasets)) #int
DFISE_obj.INFO.setField(field, Info_nb_datasets)
#INFO.setField("version", Info_num_datasets)
printINFO( "nb_datasets (" + str(Info_num_datasets) + ") = " + str(Info_nb_datasets) )
elif field == "functions":
Info_nb_functions = quantity.split('[ ')[1].split(' ]')[0].split(' ') #list of str
Info_num_functions = int(len(Info_nb_functions)) #int
DFISE_obj.INFO.setField(field, Info_nb_functions)
#INFO.setField("version", Info_num_functions)
printINFO( "nb_functions (" + str(Info_num_functions) + ") = " + str(Info_nb_functions) )
if Info_num_functions == Info_num_datasets:
print ("number of datasets matches number of functions, ok!")
DFISE_obj.DATA.setNum(Info_num_datasets)
else:
print ("number of datasets does not match number of functions, check file!")
sys.exit(0)
elif grabData == 1:
split_equ = line.split('=')
split_space = line.split(' ')
#print (split_space)
#print (split_equ)
field = None
quantity = None
if grabDataVALS == 0:
for each in split_space:
field = each.strip()
if field == '':
#print ("..blankspace or blankline.."),
continue
elif field == "Dataset":
j = j+1
printDATA( "**NEW DATASET, j = " + str(j) + " **" )
Data_name = str(line.split(' ("')[1].split('") ')[0]) #str
DFISE_obj.DATA.setField(j, "dataname", Data_name)
printDATA( "name = " + Data_name )
elif field == "function":
Data_function = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_function)
printDATA( "function = " + Data_function )
elif field == "type":
Data_type = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_type)
printDATA( "type = " + Data_type )
elif field == "dimension":
Data_dimension = str(int(split_equ[1].strip())) #int
DFISE_obj.DATA.setField(j, field, Data_dimension)
printDATA( "dimension = " + str(Data_dimension) )
elif field == "location":
Data_location = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_location)
printDATA( "location = " + Data_location )
elif field == "validity":
Data_validity = str(split_equ[1].split('[ "')[1].split('" ]')[0]) #str
DFISE_obj.DATA.setField(j, field, Data_validity)
printDATA( "validity = " + Data_validity )
elif field == "Values":
Data_num_values = int(line.split(' (')[1].split(') ')[0]) #int
DFISE_obj.DATA.setField(j, "numValues", Data_num_values)
printDATA( "num_values = " + str(Data_num_values) )
grabDataVALS = 1
datasetvals = [] # 1D list later converted to numpy array
elif grabDataVALS == 1:
## READ VALS BY LINE (DEPRICATED)###
# if line.strip() == '}':
# #print(datasetvals)
# DFISE_obj.DATA.setField(j, "Values", datasetvals)
# grabDataVALS = 0
# continue
#
# quantities = line.split(' ')
# linevals = []
# for each in quantities:
# if each.strip() == '':
# continue
# else:
# linevals.append(float(each))
#
# linevals = np.array(linevals) #each line is stored as an array
# datasetvals.append(linevals) #inside a list for each dataset
# #print ("length = " + str(len(datasetvals)) + " values = " + str(datasetvals))
## READ VALS BY SECTION (array of dataset values)###
if line.strip() == '}': #ending brace
#print(datasetvals)
datasetvals = np.array(datasetvals) #cast as numpy array (modify for alternate datatype)
#print ("length = " + str(len(datasetvals)) )#+ " values = " + str(datasetvals))
DFISE_obj.DATA.setField(j, "Values", datasetvals)
grabDataVALS = 0
continue
quantities = line.split(' ')
linevals = []
for each in quantities:
if each.strip() == '':
continue
else:
datasetvals.append(float(each))
#
#
# # #
## Done collecting all the data, close file and append to data list
f.close()
to_return.append(DFISE_obj)
i = i+1 #file number counter
# end file
print ("~~~~~~~~~~~~~~~~~~~~")
print ('')
return to_return
## END FUNCTION
def printDAT(dat):
#### Print dataset info from DF-ISE file object
####
#### In: DF-ISE object, , e.g.: dat=DFISE.readDAT(filenames)
####
#### Out: Prints info to consol.
#### Returns 1 if sucessful
####
print ('')
print ("~~~~~~~~~~~~~~~~~~~~")
print ('DF-ISE printDAT')
print ("~~~~~~~~~~~~~~~~~~~~")
print ('Dataset verification:')
i=0
for dataset in dat.DATA.datasets:
print (' '+ str(i) +' '+ dataset.dataname +' '+ dataset.validity +' '+ dataset.dimension)
i = i+1
#
print ("~~~~~~~~~~~~~~~~~~~~")
print ('')
####
def writeDAT(data, output_filename):
#### File Writer for DF-ISE data ('.dat') files
####
#### In: List of DF-ISE objects, e.g.: data=[DFISE.readDAT(filenames)]
#### Output filename string, e.g.: output_filename=str('PythonDFISEdatOUTPUT.dat')
####
#### Out: Print ',dat' with specified filename, e.g.: program should exit if not success
#### Return 1 if completed
####
####
## libraries provided by system
import numpy as np
import sys
##
if type(data)!=list: #correct if only 1 item provided
data = [data]
#
if len(data) > 1:
print ("ERROR: You must provide only 1 valid object data=[DFISE_DATfile1(, ...)]")
return 0
#print ("...... using 1st item only...") #feature depricated
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE writeDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('printing file: ' + output_filename)
FILE0 = data[0] # first data object
orig_stdout = sys.stdout # save the pointer for standard out
to_write = open(output_filename, 'wb')
sys.stdout = to_write # set the standard out pointer to the to_write file
infos = FILE0.INFO
dats = FILE0.DATA
#header
print ("DF-ISE text")
print ("")
#info
print ("Info {")
print (" " + "version = " + str(infos.version))
print (" " + "type = " + str(infos.type))
print (" " + "dimension = " + str(infos.dimension))
print (" " + "nb_vertices = " + str(infos.nb_vertices))
print (" " + "nb_edges = " + str(infos.nb_edges))
print (" " + "nb_faces = " + str(infos.nb_faces))
print (" " + "nb_elements = " + str(infos.nb_elements))
print (" " + "nb_regions = " + str(infos.nb_regions))
print (" " + "datasets = ["),
for each in infos.datasets:
print ('"'+each+'"'),
print ("]")
print (" " + "functions = ["),
for each in infos.functions:
print (each),
print ("]")
print ("}")
print ("")
#data
print ("Data {")
print ("")
for dataset in dats.datasets:
print (' Dataset ("' + dataset.dataname + '") {')
print (' function = ' + dataset.function)
print (' type = ' + dataset.type)
print (' dimension = ' + dataset.dimension)
print (' location = ' + dataset.location)
print (' validity = [ "' + dataset.validity + '" ]')
print (' Values (' + str(dataset.numValues) + ') {')
valNum = 0
for val in np.nditer(dataset.Values):
if valNum%10==0 and valNum!=0: # every ten items
print (' ') #space+newline
#elif valNum%10==0 and valNum==0: # every ten items
#print (' '),
print (''),
print ('%.15e' % float(val)),
print (''),
valNum = valNum+1
#
print(' ')
print (' }')
print (' }')
print ('')
print ('')
print ('}')
sys.stdout = orig_stdout #reset sys standard out pointer
to_write.close()
print ('~~~~~~~~~~~~~~~~~~~~')
return 1
## END FUNCTION
def combineDAT(FILEmaster, FILE0, regions0, FILE1, regions1, field_names, field_dimensions, field_location):
#### Combine DF-ISE datasets
####
#### In: Object to store resulting DF-ISE, e.g.: Filemaster=FILE0
#### File0, e.g.: FILE0=DFISE.readDAT(filename0)
#### Regions to consider in FILE0, regions0=['regA0'(, ...)]
#### File1, e.g.: FILE1=DFISE.readDAT(filename1)
#### Regions to consider in FILE1, e.g.: regions1=['regA1'(, ...)]
#### Fields to combine, e.g.: field_names=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: field_dimensions=[ "1"(, ...)]
#### Location of the points, e.g.: field_location=[ "vertex"(, ...)]
####
#### Out: Print ',dat' with specified filename, e.g.: program should exit if not success
#### Return 1 if completed
####
#### Note: # must verify numValues are the same for two datasets, obviously
#### # must verify dataname, (function, type,) location, validity
#### # ()==warning
####
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE combineDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
if len(regions0)!=len(regions1):
print ('Number of regions in regions0 and regions1 do not match!')
if len(field_names)!=len(field_dimensions) or len(field_names)!=len(field_location):
print ('Number of regions in field_<info> do not match!!')
print('files: '+FILE0.filename+' '+FILE1.filename)
#regionNUM = 0
#for region in regions0:
fieldNUM = 0
for field in field_names:
regionNUM = 0
for region in regions0:
print('--> looking for '+region)
# find dataset indices in File0
dataset0_index = []
indexNUM = 0
for dataset in FILE0.DATA.datasets:
if dataset.validity==regions0[regionNUM] and dataset.dataname==field_names[fieldNUM] and dataset.dimension==field_dimensions[fieldNUM] and dataset.location==field_location[fieldNUM]:
dataset0_index.append(indexNUM)
print(' '+'File0: @'+str(indexNUM)+' found '+dataset.dataname+' in '+dataset.validity)
else:
#print(' '+'File0: @'+str(indexNUM)+' !!!! '+dataset.dataname+' in '+dataset.validity)
pass
#
indexNUM = indexNUM+1
#
# find dataset indices in File1
dataset1_index = []
indexNUM = 0
for dataset in FILE0.DATA.datasets:
if dataset.validity==regions1[regionNUM] and dataset.dataname==field_names[fieldNUM] and dataset.dimension==field_dimensions[fieldNUM] and dataset.location==field_location[fieldNUM]:
dataset1_index.append(indexNUM)
print(' '+'File1: @'+str(indexNUM)+' found '+dataset.dataname+' in '+dataset.validity)
else:
#print(' '+'File1: @'+str(indexNUM)+' !!!! '+dataset.dataname+' in '+dataset.validity)
pass
#
indexNUM = indexNUM+1
#
## now we have two lists, (hopefully of same length), where each element corresponds to dataset# to compare in DATA.datasets[#] --> in this case add .Values
if len(dataset0_index)!=len(dataset1_index):
print (' ERROR: data files provided have some redundancy in validity/dataname.')
#
if len(dataset0_index)>1:
print (' ERROR: more than 1 dataset found for given region/info')
print(len(dataset0_index))
else:
#print(len(dataset0_index))
pass
indexNUM = 0
for each in dataset0_index:
if FILE0.DATA.datasets[dataset0_index[indexNUM]].function!=FILE1.DATA.datasets[dataset1_index[indexNUM]].function:
print('Warning: the sets being combined do not match in functionname')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].function))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].function))
pass
if FILE0.DATA.datasets[dataset0_index[indexNUM]].type!=FILE1.DATA.datasets[dataset1_index[indexNUM]].type:
print('Warning: the sets being combined do not match in type')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].type))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].type))
pass
if FILE0.DATA.datasets[dataset0_index[indexNUM]].numValues!=FILE1.DATA.datasets[dataset1_index[indexNUM]].numValues:
print('ERROR: the sets being combined do not match in numValues')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].numValues))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].numValues))
continue
#
## identifying info
print(' adding @'+str(each)+' '+FILE0.DATA.datasets[dataset1_index[indexNUM]].validity),
print (FILE0.DATA.datasets[dataset0_index[indexNUM]].dataname +'0 '+ FILE0.DATA.datasets[dataset1_index[indexNUM]].dataname+'1'),
## great, now just add them already!
tmp = np.add(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values, FILE1.DATA.datasets[dataset1_index[indexNUM]].Values)
FILEmaster.DATA.setField (dataset0_index[indexNUM], 'Values', tmp)
if all(tmp == FILEmaster.DATA.datasets[dataset0_index[indexNUM]].Values):
print('Sucess!')
else:
print('hmmph '),
print(type(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)),
print(' '),
print(type(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values))
print(' '),
print(len(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)),
print(' '),
print(len(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values))
print(' '),
print((FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)[0]),
print(' '),
print((FILE1.DATA.datasets[dataset1_index[indexNUM]].Values)[0])
print(' '),
print(type(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values[0])),
print(' '),
print(type(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values[0])),
print(np.add(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values[0],FILE1.DATA.datasets[dataset1_index[indexNUM]].Values[0]))
indexNUM = indexNUM+1
# endADD
regionNUM = regionNUM+1
# endField
fieldNUM = fieldNUM+1
#endRegion
print ('~~~~~~~~~~~~~~~~~~~~')
return FILEmaster
## END FUNCTION
def extractDAT2matrix(extract_file, extract_regions, extract_fields, extract_dimensions):
#### Extract datasets from a DF-ISE object to a matrix
####
#### In: DF-ISE file object with data, e.g.: extract_file=DFISE.readDAT(filename)
#### Regions to consider in FILE0, extract_regions=['regA0'(, ...)]
#### Datasets to extract, e.g.: extract_fields=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: extract_dimensions=[ "1"(, ...)]
####
#### Out: A list of matrices, where each matrix is data corresponding to a region
####
#### Note: # datasets are extracted in order, size of matrix may vary...
#### ... depending on available data in file
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE extractDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('processing: ' + extract_file.filename)
# print ('~~~')
print ('')
data = []
for region in extract_regions:
print ("Region: ~" + region + "~")
coords = []
fieldnum = 0
for field in extract_fields:
#print field[fieldnum]
for dataset in extract_file.DATA.datasets:
if dataset.dataname==field and dataset.validity==region and dataset.dimension==extract_dimensions[fieldnum]:
## scalar quantities
if dataset.dimension == "1":
## GRAB BY VALUE ##
coords.append(dataset.Values)
##vector quantities
elif dataset.dimension == "3":
pntsX = []
pntsY = []
pntsZ = []
## GRAB BY VALUE ##
valNUM = 1
for each in dataset.Values:
if valNUM == 1:
pntsX.append(each)
valNUM = 2
elif valNUM == 2:
pntsY.append(each)
valNUM = 3
elif valNUM == 3:
pntsZ.append(each)
valNUM = 1
#
#important!!! for dim=3, append 3 lists
coords.append(pntsX)
coords.append(pntsY)
coords.append(pntsZ)
#endif
print ("---> retrieved: '" + field + "' in: " + region +" as dim"+str(dataset.dimension))
#endmatch
#end
fieldnum = fieldnum+1
#end
#now we have all the coords and datapoints
#break
coords = np.asarray(coords)
coords = np.asmatrix(coords)
coords = coords.transpose()
data.append(coords)
# print ("~~~")
# print ("~~~ ~~~~~~~ ~~~")
print ("")
#
print ("Succesfully grabbed (" + str(len(data)) + ") regions of data with (1-3)d profiles.")
print ('~~~~~~~~~~~~~~~~~~~~')
return data
## END FUNCTION
def write2csv(data, output_prefix, regions):
#### Print extracte data to CSV
####
#### In: List of data matrices, e.g.: data=extract2matrix(extract_file, extract_regions, extract_fields, extract_dimensions)
#### Output file prexis, e.g.: output_prefix='_ASL_write2csv'
#### Regions to print (filenames), e.g.: regions=['Reg0'(, ...)]
####
#### Out: Prints a CSV file for each region with all data in data
#### Returns 1 if sucessful
####
#### Note: # Printing will overwrite for any filename collissions
####
## libraries provided by system
import numpy as np
##
if len(data) != len(regions):
print ("~~~~")
print (" length of 'data' and 'regions' not equal; they should be. exiting..")
sys.exit(0)
print ("Printing output files for each region (will overwrite) ...")
i=0
for item in data:
name = output_prefix+'_Reg'+str(i)+'_'+str(regions[i])+'.csv'
print (".... " + name),
np.savetxt( name, item, delimiter=',', fmt="%e")
#d.tofile(name, ",")
print (" OK")
i = i+1
# end
print (" ")
print ("Job completed.")
print (" ")
return 1
#print "Printing output files for each region (will overwrite) ..."
## END FUNCTION
def buildDAT(insert_data, info, insert_filename, insert_regions, insert_dataname, insert_function, insert_type, insert_dimension, insert_location):
#### Build a DF-ISE file object from data and info provided
####
#### In: DF-ISE file object with data, e.g.: extract_file=DFISE.readDAT(filename)
#### Regions to consider in FILE0, extract_regions=['regA0'(, ...)]
#### Datasets to extract, e.g.: extract_fields=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: extract_dimensions=[ "1"(, ...)]
####
#### Out: A list of matrices, where each matrix is data corresponding to a region
####
#### Note: # Currently: (fdgi) builds DAT from data and given a DFISE.INFO() object
#### # Currently: capable of building from scalar and vector data
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE buildDAT_fdgi') #from data given info ## from info given data, from info&data given 0, from 0 given info&data
print ('~~~~~~~~~~~~~~~~~~~~')
print ('building file: ' + insert_filename)
print ('')
#### FROM DATA ####
newDAT = DFISE_DATfile(insert_filename)
if len(insert_data)!=len(insert_regions):
print ("ERROR: len of regions should match len of data (list type)")
if len(sum(insert_dataname,[]))!=len(sum(insert_function,[])) or len(sum(insert_function,[]))!=len(sum(insert_type,[])) or len(sum(insert_type,[]))!=len(sum(insert_dimension,[])) or len(sum(insert_dimension,[]))!=len(sum(insert_location,[])):
print ("ERROR: list of lists should have same # of elements (necessary but not sufficient condition)")
numDatasets = len(sum(insert_dataname,[])) #count datasets
#newDAT.DATA.setNum(numDatasets)
numDatasets = 0
i=0
for region in insert_regions:
#when manipulating, easier operate on transpose, whose shape is (#datasets,#rows)
tempRegionData = insert_data[i].transpose()
print ("Region: ~" + region + "~")
datacol = 0
j=0
for dataset in insert_dataname[i]:
tempD = Dataset()
tempD.dataname = insert_dataname[i][j]
tempD.function = insert_function[i][j]
tempD.type = insert_type[i][j]
tempD.dimension = insert_dimension[i][j]
tempD.location = insert_location[i][j]
tempD.validity = region
if int(tempD.dimension)==1:
#values = tempRegionData[datacol:datacol+int(tempD.dimension)].transpose()
values = tempRegionData[datacol].transpose()
#values = []
#for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
# values.append(each[0,0])
#
elif int(tempD.dimension)==2:
values = []
for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
values.append(each[0,0])
values.append(each[0,1])
#
elif int(tempD.dimension)==3:
values = []
for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
values.append(each[0,0])
values.append(each[0,1])
values.append(each[0,2])
#
else:
print ("ERROR: DIMENSION NOT VALID")
#
tempD.Values = np.asarray(values)
tempD.numValues = (tempD.Values).size
newDAT.DATA.datasets.append(tempD)
# print tempD.dataname,
# print tempD.function,
# print tempD.type,
# print tempD.dimension,
# print tempD.location,
# print tempD.validity,
# print tempD.numValues
# print str(datacol), str(datacol+int(tempD.dimension))
# print i, j,
print numDatasets,
print newDAT.DATA.datasets[numDatasets].dataname,
print newDAT.DATA.datasets[numDatasets].function,
print newDAT.DATA.datasets[numDatasets].type,
print newDAT.DATA.datasets[numDatasets].dimension,
print newDAT.DATA.datasets[numDatasets].location,
print newDAT.DATA.datasets[numDatasets].validity,
print newDAT.DATA.datasets[numDatasets].numValues
# print newDAT.DATA.datasets[numDatasets].Values.shape
datacol = datacol+int(insert_dimension[i][j])
j=j+1
numDatasets = numDatasets+1
#
i=i+1
#
newDAT.DATA.numDatasets = numDatasets #not setNum, which makes empty Dataset() objects
print ("")
if newDAT.DATA.numDatasets == len(newDAT.DATA.datasets):
print ("Collected "+str(newDAT.DATA.numDatasets)+" datasets ...ok!")
else:
print ("ERROR: numDatasets and len(datasets) do not match!")
print ('~~~~~~~~~~~~~~~~~~~~')
#### GIVEN INFO ####
#info = info
newDAT.INFO.version = info.version
newDAT.INFO.type = info.type
newDAT.INFO.dimension = info.dimension
newDAT.INFO.nb_vertices = info.nb_vertices
newDAT.INFO.nb_edges = info.nb_edges
newDAT.INFO.nb_faces = info.nb_faces
newDAT.INFO.nb_elements = info.nb_elements
newDAT.INFO.nb_regions = info.nb_regions
newDAT.INFO.setField('datasets', sum(insert_dataname,[]))
newDAT.INFO.setField('functions', sum(insert_function,[]))
return newDAT
## END FUNCTION
####################################################################
####################################################################
| 0.046958 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to hold the Target plugin."""
import operator
import re
import cr
DEFAULT = cr.Config.From(
CR_DEFAULT_TARGET='chrome',
)
class Target(cr.Config, cr.AutoExport):
"""Base class for implementing cr targets.
A target is something that can be built and run.
"""
# The default base priority
PRIORITY = 0
# The default pattern used to try to detect whether a target is a test and
# should use the test runner.
TEST_PATTERN = re.compile('tests?$')
# The special "test type" that means it's not a test.
NOT_A_TEST = 'no'
# The default choice for the type of test when it can't be determined.
NORMAL_TEST = 'gtest'
# TODO(iancottrell): support the other test types
TEST_TYPES = [NOT_A_TEST, NORMAL_TEST]
def __init__(self, context, target_name):
super(Target, self).__init__(target_name)
self.context = context
test_type = None
if self.TEST_PATTERN.search(target_name):
test_type = self.NORMAL_TEST
config = cr.Config('DEFAULTS').From(
CR_TARGET=target_name,
CR_TARGET_NAME='{CR_TARGET}',
CR_BUILD_TARGET=cr.Config.Optional(
'{CR_TARGET}{CR_TARGET_SUFFIX}', '{CR_TARGET}'),
CR_RUN_ARGUMENTS='',
CR_TEST_TYPE=test_type,
)
self.AddChildren(config, context)
if hasattr(self, 'CONFIG'):
self.AddChild(self.CONFIG)
if not self.valid:
self.Set(CR_TARGET_SUFFIX='')
self.test_type = self.Find('CR_TEST_TYPE')
self.target_name = self.Find('CR_TARGET_NAME')
@property
def build_target(self):
return self.Get('CR_BUILD_TARGET')
@property
def verbose(self):
return self.context.verbose
@property
def dry_run(self):
return self.context.dry_run
@property
def valid(self):
return cr.Builder.IsTarget(self.context, self.build_target)
@property
def is_test(self):
return self.test_type and self.test_type != self.NOT_A_TEST
@classmethod
def AddArguments(cls, command, parser, allow_multiple=False):
nargs = '?'
help_string = 'The target to {0}'
if allow_multiple:
nargs = '*'
help_string = 'The target(s) to {0}'
parser.add_argument(
'_targets', metavar='target',
help=help_string.format(command.name),
nargs=nargs
)
@classmethod
def AllTargets(cls):
yield cls
for child in cls.__subclasses__():
for t in child.AllTargets():
yield t
@classmethod
def CreateTarget(cls, context, target_name):
"""Attempts to build a target by name.
This searches the set of installed targets in priority order to see if any
of them are willing to handle the supplied name.
If a target cannot be found, the program will be aborted.
Args:
context: The context to run in.
target_name: The name of the target we are searching for.
Returns:
The target that matched.
"""
target_clses = sorted(
cls.AllTargets(),
key=operator.attrgetter('PRIORITY'),
reverse=True
)
for handler in target_clses:
target = handler.Build(context, target_name)
if target:
if not target.valid:
print 'Invalid target {0} as {1}'.format(
target_name, target.build_target)
exit(1)
return target
print 'Unknown target {0}'.format(target_name)
exit(1)
@classmethod
def GetTargets(cls, context):
target_names = getattr(context.args, '_targets', None)
if not target_names:
target_names = [context.Get('CR_DEFAULT_TARGET')]
elif hasattr(target_names, 'swapcase'):
# deal with the single target case
target_names = [target_names]
return [cls.CreateTarget(context, target_name)
for target_name in target_names]
@classmethod
def Build(cls, context, target_name):
return cls(context, target_name)
class NamedTarget(Target):
"""A base class for explicit named targets.
Only matches a target if the name is an exact match.
Up it's priority to come ahead of general purpose rule matches.
"""
NAME = None
PRIORITY = Target.PRIORITY + 1
@classmethod
def Build(cls, context, target_name):
try:
if target_name == cls.NAME:
return cls(context, target_name)
except AttributeError:
pass
return None
| 0.012176 |
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| 0.000019 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup, Extension
import codecs
import os
import sys
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/annoy>`_\n\n
.. image:: https://img.shields.io/github/stars/spotify/annoy.svg
:target: https://github.com/spotify/annoy
"""
with codecs.open('README.rst', encoding='utf-8') as fobj:
long_description = readme_note + fobj.read()
setup(name='annoy',
version='1.5.2',
description='Approximate Nearest Neighbors in C++/Python optimized for memory usage and loading/saving to disk.',
packages=['annoy'],
ext_modules=[
Extension(
'annoy.annoylib', ['src/annoymodule.cc'],
depends=['src/annoylib.h'],
extra_compile_args=['-O3', '-march=native', '-ffast-math'],
)
],
long_description=long_description,
author='Erik Bernhardsson',
author_email='[email protected]',
url='https://github.com/spotify/annoy',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='nns, approximate nearest neighbor search',
setup_requires=['nose>=1.0']
)
| 0.000942 |
# CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from cubetl.core import Node, Component
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TemplateRendererBase(Node):
def __init__(self, template):
self.template = template
def render(self, ctx, data):
raise NotImplementedError()
def process(self, ctx, m):
#template = ctx.interpolate(self.template, m)
result = self.render(ctx, {'m': m})
m['templated'] = result
yield m
| 0.00189 |
import myssl, select, handleHTTP, socket
import sys, struct, os, random, hashlib, time, threading
import json
import logging
#MAXSYN = 2 ** 15
MAXSYNBUFFER = 64
MAXSYN = 1024
#REMOTE_lines = 4
def filepath(f):
return os.path.join(os.path.split(os.path.realpath(__file__))[0], f)
def random_data(len):
d = ''
for i in range(0, len):
d += chr(random.randint(0,255))
return d
def send_all(sock, data):
bytes_sent = 0
con = 0
while 1:
r = sock.send(data[bytes_sent:])
if r < 0:
return r
bytes_sent += r
if bytes_sent == len(data):
return bytes_sent
con = con + 1
if con > 20:
raise Exception('send too many times!')
def read_all(sock):
data_len = sock.recv(2)
con = 0
if len(data_len) <= 0:
raise Exception('read_all zero data!')
data_len = struct.unpack("H",data_len)[0]
if data_len <= 0:
raise Exception('read_all data_len error!')
data = ''
while data_len > 0:
d = sock.recv(data_len)
if len(d) <= 0:
raise Exception('read_all read error!')
data += d
data_len -= len(d)
con += 1
if con > 20:
raise Exception('read too many times!')
return data
class zProxyHandle(myssl.zProxyRequestHandler):
def handle_socket5(self, sock, remote):
try:
fdset = [sock, remote]
while 1:
r, w, e = select.select(fdset, [], [])
if sock in r:
data = sock.recv(4096)
if len(data) <= 0:
break
result = send_all(remote, data)
if result < len(data):
raise Exception('failed to send all data')
if remote in r:
data = remote.recv(4096)
if len(data) <= 0:
break
result = send_all(sock, data)
if result < len(data):
raise Exception('failed to send all data')
if not len(r):
break
finally:
sock.close()
remote.close()
def socket5proxy(self):
try:
sock = self.connection
addrtype = ord(sock.recv(1))
if addrtype > 4:
return addrtype
if addrtype == 1:
addr = socket.inet_ntoa(self.rfile.read(4))
elif addrtype == 3:
addr = self.rfile.read(ord(sock.recv(1)))
elif addrtype == 4:
addr = socket.inet_ntop(socket.AF_INET6,self.rfile.read(16))
else:
# not support
logging.warn('addr_type not support')
return
port = struct.unpack('>H', self.rfile.read(2))
try:
logging.info('connecting %s:%d' % (addr, port[0]))
remote = socket.create_connection((addr, port[0]))
except socket.error, e:
logging.warn(e)
return
self.handle_socket5(sock, remote)
except socket.error, e:
logging.warn(e)
return
def handleProxy(self):
addrtype = self.socket5proxy()
if addrtype:
self.tcpproxy(addrtype)
def tcpproxy(self, addrtype):
self.tcpruning = True
try:
sock = self.connection
if addrtype == 8:
self.remote = TCP_CLIENTS.handleproxy(self)
if self.remote:
self.handle_TCP()
return
elif addrtype == 5:
addr = socket.inet_ntoa(self.rfile.read(4))
elif addrtype == 6:
addr = self.rfile.read(ord(sock.recv(1)))
elif addrtype == 7:
addr = socket.inet_ntop(socket.AF_INET6,self.rfile.read(16))
else:
# not support
logging.warn('addr_type not support')
return
port = struct.unpack('>H', self.rfile.read(2))
clientID = hashlib.sha1(str(self.client_address) + random_data(20) + str(time.time())).digest()
self.remote = TCP_CLIENTS.newproxy(clientID, addr, port[0], self)
if self.remote:
self.handle_TCP()
return
except socket.error, e:
logging.warn(e)
return
def handle_TCP(self):
try:
sock = self.connection
fset = [sock]
while self.tcpruning:
r, w, e = select.select(fset, [], [])
if sock in r:
self.remote.send(read_all(sock))
else:
break
except:
print 'handle_TCP'
print sys.exc_info()
finally:
self.destroy()
def destroy(self):
self.tcpruning = False
self.remote.remove(self)
self.connection.close()
def send(self, data):
try:
result = send_all(self.connection, data)
if result < len(data):
raise Exception('failed to send all data')
return True
except:
print 'Hsend'
print sys.exc_info()
self.destroy()
return False
def verify(self):
global PW
if self.data[:20] == PW:
#Going up, as a proxy
self.connection.send(PW + '\x00' * random.randint(30,150))
return True
else:
#Going down, as a HTTP
return False
def log_message(self, format, *args):
s = ("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
l = open(HTTPLOG,'a+')
l.write(s)
l.close()
sys.stderr.write(s)
version_string = handleHTTP.version_string
do_HEAD = handleHTTP.send404
do_PUT = handleHTTP.send404
do_POST = handleHTTP.send404
do_DELETE = handleHTTP.send404
do_CONNECT = handleHTTP.send404
do_GET = handleHTTP.do_GET
class tcpproxyhandle:
def __init__(self):
self.clientlist = {}
def newproxy(self, clientID, addr, port, client):
try:
remote = socket.create_connection((addr, port))
client.connection.send(clientID + '\x00' * random.randint(10,80))
reID = client.connection.recv(65535)
if reID[:20] == clientID:
t = tcp_remote(remote, clientID)
t.Load(client)
t.start()
self.clientlist[clientID] = t
return t
except:
print sys.exc_info()
def handleproxy(self, client):
try:
ID = client.connection.recv(65535)[:20]
if ID in self.clientlist:
client.connection.send(ID + '\x00' * random.randint(10, 80))
t = self.clientlist[ID]
t.Load(client)
return t
except:
print sys.exc_info()
def removeID(self, ID):
if ID in self.clientlist:
del self.clientlist[ID]
class tcp_remote(threading.Thread):
def __init__(self, sock, clientID):
threading.Thread.__init__(self)
self.sock = sock
self.ID = clientID
self.clients = []
self.mutex = threading.Lock()
self.SendSYN = 0
self.RecvSYN = 0
self.SYNbuffer = {}
def run(self):
sock = self.sock
fset = [sock]
try:
while len(self.clients):
r, w, e = select.select(fset, [], [])
if sock in r:
data = sock.recv(1020)
if len(data) <= 0:
break
data = struct.pack("H",self.SendSYN) + data
self.SendSYN = (self.SendSYN + 1) % MAXSYN
data = struct.pack("H",len(data)) + data
while len(self.clients):
if random.choice(self.clients[-4:]).send(data):
break
else:
break
except:
print 'tcp_remote'
print sys.exc_info()
finally:
self.destroy()
def Load(self, client):
self.clients.append(client)
def remove(self, client):
if client in self.clients:
self.clients.remove(client)
if not len(self.clients):
self.destroy()
def send(self, data):
def _send(self, data):
result = send_all(self.sock, data)
if result < len(data):
raise Exception('failed to send all data')
self.RecvSYN = (self.RecvSYN + 1) % MAXSYN
try:
self.mutex.acquire()
syn = struct.unpack("H",data[:2])[0]
if syn == self.RecvSYN:
_send(self, data[2:])
while len(self.SYNbuffer):
if self.RecvSYN in self.SYNbuffer:
#print 'SYN out', self.RecvSYN
_send(self, self.SYNbuffer.pop(self.RecvSYN))
else:
break
else:
if len(self.SYNbuffer) >= MAXSYNBUFFER:
raise Exception('SYNbuffer overflow')
#print 'SYN need', self.RecvSYN, 'save', syn
self.SYNbuffer[syn] = data[2:]
except:
print 'Tsend'
print sys.exc_info()
self.destroy()
finally:
self.mutex.release()
def destroy(self):
TCP_CLIENTS.removeID(self.ID)
while len(self.clients):
self.clients.pop().destroy()
self.sock.close()
def main():
global PW, HTTPLOG, TCP_CLIENTS
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
with open(filepath('config.json'), 'rb') as f:
config = json.load(f)
logging.info('loading config from %s' % filepath('config.json'))
SERVER = config['server']
PORT = config['server_port']
PW = hashlib.sha1(config['password'] + "3dfghuyrfplndd3e2sdrr4dddff").digest()
IPv6 = int(config['ipv6'])
CRT = filepath(config['crt'])
KEY = filepath(config['key'])
TCP_CLIENTS = tcpproxyhandle()
if IPv6:
ThreadingTCPServer.address_family = socket.AF_INET6
HTTPLOG = filepath('http.log')
server = myssl.ThreadingzProxyServer((SERVER,PORT[0]),
zProxyHandle,
CRT,
KEY)
logging.info("starting server at %s:%d" % tuple(server.server_address[:2]))
try:
server.serve_forever()
except socket.error, e:
logging.error(e)
server.shutdown()
server.server_close()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
sys.exit(0)
if __name__ == '__main__':
main()
| 0.004767 |
"""Provides convenient access to data viz challenge data.
Source: https://github.com/localytics/data-viz-challenge
This dataset is excellent for testing and demonstrating data
viz capabilities because it contains numerous categorical
columns, with both high and low cardinality, columns with NaN
values, dates and locations. This is a very good example of
the kind of data that you might see from an information system,
where the analyst might be simply helping visualize the data
(business intelligence), or trying to understand how to exploit
the data for better system performance.
This script will download the json data, only the first time imported
from, then will load the data and clean it up in a pandas
DataFrame.
Resulting dataframe reports the following dtypes:
age object
amount float64
category object
client_time datetime64[ns]
device object
event_name object
gender object
city object
latitude float64
longitude float64
state object
zip_code int64
marital_status object
session_id object
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'project_funding sample data requires Pandas (http://pandas.pydata.org) to be installed')
import os
from six.moves.urllib.request import URLopener
from bokeh.charts.utils import df_from_json
DATA_URL = "https://raw.githubusercontent.com/localytics/data-viz-challenge/master/data.json"
DOWNLOAD_NAME = 'project_funding.json'
CSV_NAME = 'project_funding.csv'
# Get absolute path relative to script
data_dir = os.path.dirname(os.path.realpath(__file__))
json_file_path = os.path.join(data_dir, DOWNLOAD_NAME)
csv_file_path = os.path.join(data_dir, CSV_NAME)
def download_project_funding():
if not os.path.isfile(json_file_path):
print('Downloading project funding source data.')
json_data = URLopener()
json_data.retrieve(DATA_URL, json_file_path)
print('Download complete!')
def load_project_funding():
project_funding = df_from_json(json_file_path)
# cleanup column names
cols = project_funding.columns
flat_cols = [col.split('.')[1] if '.' in col else col for col in cols]
project_funding.columns = flat_cols
# convert to dates
project_funding['client_time'] = pd.to_datetime(project_funding['client_time'], unit='s')
return project_funding
def load_cached_funding():
if not os.path.isfile(csv_file_path):
project_funding = load_project_funding()
project_funding.to_csv(csv_file_path, index=False)
else:
project_funding = pd.read_csv(csv_file_path, parse_dates=['client_time'])
return project_funding
download_project_funding()
project_funding = load_cached_funding()
| 0.002645 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Group multiple small images into larger textures.
This module is used by :py:mod:`pyglet.resource` to efficiently pack small
images into larger textures. :py:class:`~pyglet.image.atlas.TextureAtlas` maintains one texture;
:py:class:`TextureBin` manages a collection of atlases of a given size.
Example usage::
# Load images from disk
car_image = pyglet.image.load('car.png')
boat_image = pyglet.image.load('boat.png')
# Pack these images into one or more textures
bin = TextureBin()
car_texture = bin.add(car_image)
boat_texture = bin.add(boat_image)
The result of :py:meth:`TextureBin.add` is a :py:class:`TextureRegion`
containing the image. Once added, an image cannot be removed from a bin (or an
atlas); nor can a list of images be obtained from a given bin or atlas -- it is
the application's responsibility to keep track of the regions returned by the
``add`` methods.
.. versionadded:: 1.1
"""
import pyglet
class AllocatorException(Exception):
"""The allocator does not have sufficient free space for the requested
image size."""
pass
class _Strip:
__slots__ = 'x', 'y', 'max_height', 'y2'
def __init__(self, y, max_height):
self.x = 0
self.y = y
self.max_height = max_height
self.y2 = y
def add(self, width, height):
assert width > 0 and height > 0
assert height <= self.max_height
x, y = self.x, self.y
self.x += width
self.y2 = max(self.y + height, self.y2)
return x, y
def compact(self):
self.max_height = self.y2 - self.y
class Allocator:
"""Rectangular area allocation algorithm.
Initialise with a given ``width`` and ``height``, then repeatedly
call `alloc` to retrieve free regions of the area and protect that
area from future allocations.
`Allocator` uses a fairly simple strips-based algorithm. It performs best
when rectangles are allocated in decreasing height order.
"""
__slots__ = 'width', 'height', 'strips', 'used_area'
def __init__(self, width, height):
"""Create an `Allocator` of the given size.
:Parameters:
`width` : int
Width of the allocation region.
`height` : int
Height of the allocation region.
"""
assert width > 0 and height > 0
self.width = width
self.height = height
self.strips = [_Strip(0, height)]
self.used_area = 0
def alloc(self, width, height):
"""Get a free area in the allocator of the given size.
After calling `alloc`, the requested area will no longer be used.
If there is not enough room to fit the given area `AllocatorException`
is raised.
:Parameters:
`width` : int
Width of the area to allocate.
`height` : int
Height of the area to allocate.
:rtype: int, int
:return: The X and Y coordinates of the bottom-left corner of the
allocated region.
"""
for strip in self.strips:
if self.width - strip.x >= width and strip.max_height >= height:
self.used_area += width * height
return strip.add(width, height)
if self.width >= width and self.height - strip.y2 >= height:
self.used_area += width * height
strip.compact()
newstrip = _Strip(strip.y2, self.height - strip.y2)
self.strips.append(newstrip)
return newstrip.add(width, height)
raise AllocatorException('No more space in %r for box %dx%d' % (self, width, height))
def get_usage(self):
"""Get the fraction of area already allocated.
This method is useful for debugging and profiling only.
:rtype: float
"""
return self.used_area / float(self.width * self.height)
def get_fragmentation(self):
"""Get the fraction of area that's unlikely to ever be used, based on
current allocation behaviour.
This method is useful for debugging and profiling only.
:rtype: float
"""
# The total unused area in each compacted strip is summed.
if not self.strips:
return 0.0
possible_area = self.strips[-1].y2 * self.width
return 1.0 - self.used_area / float(possible_area)
class TextureAtlas:
"""Collection of images within a texture."""
def __init__(self, width=2048, height=2048):
"""Create a texture atlas of the given size.
:Parameters:
`width` : int
Width of the underlying texture.
`height` : int
Height of the underlying texture.
"""
max_texture_size = pyglet.image.get_max_texture_size()
width = min(width, max_texture_size)
height = min(height, max_texture_size)
self.texture = pyglet.image.Texture.create(width, height)
self.allocator = Allocator(width, height)
def add(self, img, border=0):
"""Add an image to the atlas.
This method will fail if the given image cannot be transferred
directly to a texture (for example, if it is another texture).
:py:class:`~pyglet.image.ImageData` is the usual image type for this method.
`AllocatorException` will be raised if there is no room in the atlas
for the image.
:Parameters:
`img` : `~pyglet.image.AbstractImage`
The image to add.
`border` : int
Leaves specified pixels of blank space around
each image added to the Atlas.
:rtype: :py:class:`~pyglet.image.TextureRegion`
:return: The region of the atlas containing the newly added image.
"""
x, y = self.allocator.alloc(img.width + border*2, img.height + border*2)
self.texture.blit_into(img, x+border, y+border, 0)
return self.texture.get_region(x+border, y+border, img.width, img.height)
class TextureBin:
"""Collection of texture atlases.
:py:class:`~pyglet.image.atlas.TextureBin` maintains a collection of texture atlases, and creates new
ones as necessary to accommodate images added to the bin.
"""
def __init__(self, texture_width=2048, texture_height=2048):
"""Create a texture bin for holding atlases of the given size.
:Parameters:
`texture_width` : int
Width of texture atlases to create.
`texture_height` : int
Height of texture atlases to create.
`border` : int
Leaves specified pixels of blank space around
each image added to the Atlases.
"""
max_texture_size = pyglet.image.get_max_texture_size()
self.texture_width = min(texture_width, max_texture_size)
self.texture_height = min(texture_height, max_texture_size)
self.atlases = []
def add(self, img, border=0):
"""Add an image into this texture bin.
This method calls `TextureAtlas.add` for the first atlas that has room
for the image.
`AllocatorException` is raised if the image exceeds the dimensions of
``texture_width`` and ``texture_height``.
:Parameters:
`img` : `~pyglet.image.AbstractImage`
The image to add.
`border` : int
Leaves specified pixels of blank space around
each image added to the Atlas.
:rtype: :py:class:`~pyglet.image.TextureRegion`
:return: The region of an atlas containing the newly added image.
"""
for atlas in list(self.atlases):
try:
return atlas.add(img, border)
except AllocatorException:
# Remove atlases that are no longer useful (so that their textures
# can later be freed if the images inside them get collected).
if img.width < 64 and img.height < 64:
self.atlases.remove(atlas)
atlas = TextureAtlas(self.texture_width, self.texture_height)
self.atlases.append(atlas)
return atlas.add(img, border)
| 0.0009 |
#!/usr/bin/env python
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(dt, time_integrator):
'''
Function which actually runs MOOSE.
'''
implicit_flag = 'true'
explicit_methods = ['ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
# Set implicit_flag based on TimeIntegrator name
if (time_integrator in explicit_methods):
implicit_flag = 'false'
command_line_args = ['../../../moose_test-opt', '-i', 'scalar.i',
'Executioner/dt={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'GlobalParams/implicit={}'.format(implicit_flag)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
last_row = get_last_row('scalar_out.csv')
return float(last_row[1])
#
# Main program
#
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Lists of timesteps and TimeIntegrators to plot.
time_integrators = ['ImplicitEuler', 'ImplicitMidpoint', 'LStableDirk2', 'BDF2', 'CrankNicolson',
'LStableDirk3', 'LStableDirk4', 'AStableDirk4',
'ExplicitEuler', 'ExplicitMidpoint', 'Heun', 'Ralston']
dts = [.125, .0625, .03125, .015625]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-.', '--', ':', '-.', '--', ':', '--', '-', '-.', '-']
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(dt, time_integrator))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Make the plot -- unpack the user's additional plotting arguments
# from kwargs by prepending with **.
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log \|e(T)\|_{L^2}$')
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| 0.011706 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark sincos."""
from __future__ import print_function
import timeit
NAME = "sincos"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import sin, cos; from random import random;"
stmt = "x = 20.0*random() - 10.0; y = [ sin( x ), cos( x ) ]"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| 0 |
# Demo of using just windows, without documents and views.
# Also demo of a GUI thread, pretty much direct from the MFC C++ sample MTMDI.
import win32ui
import win32con
import win32api
import timer
from pywin.mfc import window, docview, thread
WM_USER_PREPARE_TO_CLOSE = win32con.WM_USER + 32
# font is a dictionary in which the following elements matter:
# (the best matching font to supplied parameters is returned)
# name string name of the font as known by Windows
# size point size of font in logical units
# weight weight of font (win32con.FW_NORMAL, win32con.FW_BOLD)
# italic boolean; true if set to anything but None
# underline boolean; true if set to anything but None
# This window is a child window of a frame. It is not the frame window itself.
class FontWindow(window.Wnd):
def __init__(self, text = 'Python Rules!'):
window.Wnd.__init__(self)
self.text = text
self.index = 0
self.incr = 1
self.width = self.height = 0
self.ChangeAttributes()
# set up message handlers
def Create(self, title, style, rect, parent):
classStyle = win32con.CS_HREDRAW | win32con.CS_VREDRAW
className = win32ui.RegisterWndClass(classStyle, 0, win32con.COLOR_WINDOW+1, 0)
self._obj_ = win32ui.CreateWnd()
self._obj_.AttachObject(self)
self._obj_.CreateWindow(className, title, style, rect, parent, win32ui.AFX_IDW_PANE_FIRST)
self.HookMessage (self.OnSize, win32con.WM_SIZE)
self.HookMessage (self.OnPrepareToClose, WM_USER_PREPARE_TO_CLOSE)
self.HookMessage (self.OnDestroy, win32con.WM_DESTROY)
self.timerid = timer.set_timer (100, self.OnTimer)
self.InvalidateRect()
def OnDestroy (self, msg):
timer.kill_timer(self.timerid)
def OnTimer(self, id, timeVal):
self.index = self.index + self.incr
if self.index > len(self.text):
self.incr = -1
self.index = len(self.text)
elif self.index < 0:
self.incr = 1
self.index = 0
self.InvalidateRect()
def OnPaint (self):
# print "Paint message from thread", win32api.GetCurrentThreadId()
dc, paintStruct = self.BeginPaint()
self.OnPrepareDC(dc, None)
if (self.width == 0 and self.height == 0):
left, top, right, bottom = self.GetClientRect()
self.width = right - left
self.height = bottom - top
x, y = self.width / 2, self.height / 2
dc.TextOut (x, y, self.text[:self.index])
self.EndPaint(paintStruct)
def ChangeAttributes(self):
font_spec = {'name':'Arial', 'height':42}
self.font = win32ui.CreateFont (font_spec)
def OnPrepareToClose(self, params):
self.DestroyWindow()
def OnSize (self, params):
lParam = params[3]
self.width = win32api.LOWORD(lParam)
self.height = win32api.HIWORD(lParam)
def OnPrepareDC (self, dc, printinfo):
# Set up the DC for forthcoming OnDraw call
dc.SetTextColor (win32api.RGB(0,0,255))
dc.SetBkColor (win32api.GetSysColor (win32con.COLOR_WINDOW))
dc.SelectObject (self.font)
dc.SetTextAlign (win32con.TA_CENTER | win32con.TA_BASELINE)
class FontFrame(window.MDIChildWnd):
def __init__(self):
pass # Dont call base class doc/view version...
def Create(self, title, rect = None, parent = None):
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_OVERLAPPEDWINDOW
self._obj_ = win32ui.CreateMDIChild()
self._obj_.AttachObject(self)
self._obj_.CreateWindow(None, title, style, rect, parent)
rect = self.GetClientRect()
rect = (0,0,rect[2]-rect[0], rect[3]-rect[1])
self.child = FontWindow("Not threaded")
self.child.Create("FontDemo", win32con.WS_CHILD | win32con.WS_VISIBLE, rect, self)
class TestThread(thread.WinThread):
def __init__(self, parentWindow):
self.parentWindow = parentWindow
self.child = None
thread.WinThread.__init__(self)
def InitInstance(self):
rect = self.parentWindow.GetClientRect()
rect = (0,0,rect[2]-rect[0], rect[3]-rect[1])
self.child = FontWindow()
self.child.Create("FontDemo", win32con.WS_CHILD | win32con.WS_VISIBLE, rect, self.parentWindow)
self.SetMainFrame(self.child)
return thread.WinThread.InitInstance(self)
def ExitInstance(self):
return 0
class ThreadedFontFrame(window.MDIChildWnd):
def __init__(self):
pass # Dont call base class doc/view version...
self.thread = None
def Create(self, title, rect = None, parent = None):
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_OVERLAPPEDWINDOW
self._obj_ = win32ui.CreateMDIChild()
self._obj_.CreateWindow(None, title, style, rect, parent)
self._obj_.HookMessage(self.OnDestroy, win32con.WM_DESTROY)
self._obj_.HookMessage (self.OnSize, win32con.WM_SIZE)
self.thread = TestThread(self)
self.thread.CreateThread()
def OnSize(self, msg):
pass
def OnDestroy(self, msg):
win32ui.OutputDebugString("OnDestroy\n")
if self.thread and self.thread.child:
child = self.thread.child
child.SendMessage(WM_USER_PREPARE_TO_CLOSE, 0, 0)
win32ui.OutputDebugString("Destroyed\n")
def Demo():
f = FontFrame()
f.Create("Font Demo")
def ThreadedDemo():
rect = win32ui.GetMainFrame().GetMDIClient().GetClientRect()
rect = rect[0], rect[3]*3/4, rect[2]/4, rect[3]
incr = rect[2]
for i in range(4):
if i==0:
f = FontFrame()
title = "Not threaded"
else:
f = ThreadedFontFrame()
title = "Threaded GUI Demo"
f.Create(title, rect)
rect = rect[0] + incr, rect[1], rect[2]+incr, rect[3]
# Givem a chance to start
win32api.Sleep(100)
win32ui.PumpWaitingMessages()
if __name__=='__main__':
import demoutils
if demoutils.NeedGoodGUI():
ThreadedDemo()
# Demo()
| 0.033765 |
from copy import deepcopy as copy
template = {
'template_type': 'receipt',
'value': {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'receipt',
'recipient_name': '',
'order_number': '',
'currency': '',
'payment_method': ''
}
}
}
}
class ReceiptTemplate:
def __init__(self, recipient_name='', order_number='', currency='', payment_method='', timestamp='', order_url=''):
self.template = copy(template['value'])
self.template['attachment']['payload']['recipient_name'] = recipient_name
self.template['attachment']['payload']['order_number'] = order_number
self.template['attachment']['payload']['currency'] = currency
self.template['attachment']['payload']['payment_method'] = payment_method
if timestamp != '':
self.template['attachment']['payload']['timestamp'] = timestamp
if order_url != '':
self.template['attachment']['payload']['order_url'] = order_url
self.elements = []
self.address = {}
self.summary = {}
self.adjustments = []
def add_element(self, title='', subtitle='', quantity=-1, price=0, currency='', image_url=''):
element = {}
element['title'] = title
if subtitle != '':
element['subtitle'] = subtitle
if quantity != -1:
element['quantity'] = quantity
element['price'] = price
if currency != '':
element['currency'] = currency
if image_url != '':
element['image_url'] = image_url
self.elements.append(element)
def set_address(self, street_1='', street_2='', city='', postal_code='', state='', country=''):
self.address['street_1'] = street_1
if street_2 != '':
self.address['street_2'] = street_2
self.address['city'] = city
self.address['postal_code'] = postal_code
self.address['state'] = state
self.address['country'] = country
def set_summary(self, subtotal=-1, shipping_cost=-1, total_tax=-1, total_cost=0):
if subtotal != -1:
self.summary['subtotal'] = subtotal
if shipping_cost != -1:
self.summary['shipping_cost'] = shipping_cost
if total_tax != -1:
self.summary['total_tax'] = total_tax
self.summary['total_cost'] = total_cost
def add_adjustment(self, name='', amount=0):
adjustment = {}
adjustment['name'] = name
adjustment['amount'] = amount
self.adjustments.append(adjustment)
def get_message(self):
self.template['attachment']['payload']['elements'] = self.elements
if self.address != {}:
self.template['attachment']['payload']['address'] = self.address
self.template['attachment']['payload']['summary'] = self.summary
if self.adjustments != []:
self.template['attachment']['payload']['adjustments'] = self.adjustments
return self.template
| 0.002249 |
from textwrap import dedent
from typing import Optional, Union
from unittest.case import TestCase
from service_client.utils import IncompleteFormatter, random_token, build_parameter_object
class TestIncompleteFormatter(TestCase):
def setUp(self):
self.formatter = IncompleteFormatter()
def test_all_items_kwargs(self):
self.assertEqual(self.formatter.format("Test {var1} with {var2} kwarg", var1="first", var2=2),
"Test first with 2 kwarg")
self.assertEqual(self.formatter.get_substituted_fields(), ['var1', 'var2'])
self.assertEqual(self.formatter.get_not_substituted_fields(), [])
def test_one_items_kwargs(self):
self.assertEqual(self.formatter.format("Test {var1} with {var2} kwarg", var1="first"),
"Test first with {var2} kwarg")
self.assertEqual(self.formatter.get_substituted_fields(), ['var1'])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['var2'])
def test_no_items_kwargs(self):
self.assertEqual(self.formatter.format("Test {var1} with {var2} kwarg"),
"Test {var1} with {var2} kwarg")
self.assertEqual(self.formatter.get_substituted_fields(), [])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['var1', 'var2'])
def test_all_items_indexed_args(self):
self.assertEqual(self.formatter.format("Test {0} with {1} indexed args", "first", 2),
"Test first with 2 indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), ['0', '1'])
self.assertEqual(self.formatter.get_not_substituted_fields(), [])
def test_one_items_indexed_args(self):
self.assertEqual(self.formatter.format("Test {0} with {1} indexed args", 'first'),
"Test first with {1} indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), ['0'])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['1'])
def test_no_items_indexed_args(self):
self.assertEqual(self.formatter.format("Test {0} with {1} indexed args"),
"Test {0} with {1} indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), [])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['0', '1'])
def test_all_items_not_indexed_args(self):
self.assertEqual(self.formatter.format("Test {} with {} indexed args", "first", 2),
"Test first with 2 indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), ['0', '1'])
self.assertEqual(self.formatter.get_not_substituted_fields(), [])
def test_one_items_not_indexed_args(self):
self.assertEqual(self.formatter.format("Test {} with {} indexed args", 'first'),
"Test first with {1} indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), ['0'])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['1'])
def test_no_items_not_indexed_args(self):
self.assertEqual(self.formatter.format("Test {} with {} indexed args"),
"Test {0} with {1} indexed args")
self.assertEqual(self.formatter.get_substituted_fields(), [])
self.assertEqual(self.formatter.get_not_substituted_fields(), ['0', '1'])
class RandomTokenTest(TestCase):
def test_random_token(self):
self.assertNotEqual(random_token(), random_token())
self.assertNotEqual(random_token(), random_token())
self.assertNotEqual(random_token(), random_token())
def test_default_length(self):
self.assertEqual(len(random_token()), 10)
def test_custom_length(self):
self.assertEqual(len(random_token(20)), 20)
class FakeModel:
def __init__(self, data=None):
try:
self.fieldname_1 = data['fieldname_1']
except (KeyError, TypeError):
self.fieldname_1 = None
class BuildParameterObjectTests(TestCase):
class Fake:
@build_parameter_object
def method_union(self, request: Union[FakeModel, None]):
return request
@build_parameter_object(arg_name='request_1', arg_index=1, arg_class=FakeModel)
def method_no_anno_extra_params(self, param_1, request_1, param_2):
"""
:param param_1:
:param request_1:
:param param_2:
:return:
"""
return param_1, request_1, param_2
@build_parameter_object
def method_optional(self, request: Optional[FakeModel]):
return request
@build_parameter_object
def method_class(self, request: FakeModel):
return request
def setUp(self):
self.object = self.Fake()
def test_using_union_positional(self):
request = FakeModel()
self.assertEqual(self.object.method_union(request), request)
def test_using_union_keyword(self):
request = FakeModel()
self.assertEqual(self.object.method_union(request=request), request)
def test_using_union_build(self):
result = self.object.method_union(fieldname_1=1)
self.assertIsInstance(result, FakeModel)
self.assertEqual(result.fieldname_1, 1)
def test_using_union_build_empty(self):
result = self.object.method_union()
self.assertIsInstance(result, FakeModel)
self.assertIsNone(result.fieldname_1)
def test_using_optional_positional(self):
request = FakeModel()
self.assertEqual(self.object.method_optional(request), request)
def test_using_optional_keyword(self):
request = FakeModel()
self.assertEqual(self.object.method_optional(request=request), request)
def test_using_optional_build(self):
result = self.object.method_optional(fieldname_1=1)
self.assertIsInstance(result, FakeModel)
self.assertEqual(result.fieldname_1, 1)
def test_using_optional_build_empty(self):
result = self.object.method_optional()
self.assertIsInstance(result, FakeModel)
self.assertIsNone(result.fieldname_1)
def test_using_class_positional(self):
request = FakeModel()
self.assertEqual(self.object.method_class(request), request)
def test_using_class_keyword(self):
request = FakeModel()
self.assertEqual(self.object.method_class(request=request), request)
def test_using_class_build(self):
result = self.object.method_class(fieldname_1=1)
self.assertIsInstance(result, FakeModel)
self.assertEqual(result.fieldname_1, 1)
def test_using_class_build_empty(self):
result = self.object.method_class()
self.assertIsInstance(result, FakeModel)
self.assertIsNone(result.fieldname_1)
def test_using_no_anno_extra_params_positional(self):
request = FakeModel()
self.assertEqual(self.object.method_no_anno_extra_params(1, request, 2), (1, request, 2))
def test_using_no_anno_extra_params_keyword(self):
request = FakeModel()
self.assertEqual(self.object.method_no_anno_extra_params(param_1=1, request_1=request, param_2=2),
(1, request, 2))
def test_using_no_anno_extra_params_build(self):
result = self.object.method_no_anno_extra_params(1, fieldname_1=1, param_2=2)
self.assertEqual(result[0], 1)
self.assertEqual(result[2], 2)
self.assertIsInstance(result[1], FakeModel)
self.assertEqual(result[1].fieldname_1, 1)
def test_using_no_anno_extra_params_build_empty(self):
result = self.object.method_no_anno_extra_params(1, param_2=2)
self.assertEqual(result[0], 1)
self.assertEqual(result[2], 2)
self.assertIsInstance(result[1], FakeModel)
self.assertIsNone(result[1].fieldname_1)
def test_doc(self):
self.assertEqual(self.object.method_no_anno_extra_params.__doc__,
dedent("""
:param param_1:
:param request_1:
:param param_2:
:return:
It is possible to use keyword parameters to build an
object :class:`~tests.tests_utils.FakeModel` for parameter ``request_1``."""),
self.object.method_no_anno_extra_params.__doc__)
| 0.002227 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_bigquery_dataset_facts
description:
- Gather facts for GCP Dataset
short_description: Gather facts for GCP Dataset
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a dataset facts"
gcp_bigquery_dataset_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- Dataset name.
returned: success
type: str
access:
description:
- Access controls on the bucket.
returned: success
type: complex
contains:
domain:
description:
- A domain to grant access to. Any users signed in with the domain specified
will be granted the specified access .
returned: success
type: str
groupByEmail:
description:
- An email address of a Google Group to grant access to.
returned: success
type: str
role:
description:
- Describes the rights granted to the user specified by the other member
of the access object .
returned: success
type: str
specialGroup:
description:
- A special group to grant access to.
returned: success
type: str
userByEmail:
description:
- 'An email address of a user to grant access to. For example: [email protected]
.'
returned: success
type: str
view:
description:
- A view from a different dataset to grant access to. Queries executed against
that view will have read access to tables in this dataset. The role field
is not required when this field is set. If that view is updated by any
user, access to the view needs to be granted again via an update operation.
returned: success
type: complex
contains:
datasetId:
description:
- The ID of the dataset containing this table.
returned: success
type: str
projectId:
description:
- The ID of the project containing this table.
returned: success
type: str
tableId:
description:
- The ID of the table. The ID must contain only letters (a-z, A-Z),
numbers (0-9), or underscores. The maximum length is 1,024 characters.
returned: success
type: str
creationTime:
description:
- The time when this dataset was created, in milliseconds since the epoch.
returned: success
type: int
datasetReference:
description:
- A reference that identifies the dataset.
returned: success
type: complex
contains:
datasetId:
description:
- A unique ID for this dataset, without the project name. The ID must contain
only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length
is 1,024 characters.
returned: success
type: str
projectId:
description:
- The ID of the project containing this dataset.
returned: success
type: str
defaultTableExpirationMs:
description:
- The default lifetime of all tables in the dataset, in milliseconds .
returned: success
type: int
description:
description:
- A user-friendly description of the dataset.
returned: success
type: str
friendlyName:
description:
- A descriptive name for the dataset.
returned: success
type: str
id:
description:
- The fully-qualified unique name of the dataset in the format projectId:datasetId.
The dataset name without the project name is given in the datasetId field
.
returned: success
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and
group your datasets .
returned: success
type: dict
lastModifiedTime:
description:
- The date when this dataset or any of its tables was last modified, in milliseconds
since the epoch.
returned: success
type: int
location:
description:
- The geographic location where the dataset should reside. Possible values include
EU and US. The default value is US.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery']
items = fetch_list(module, collection(module))
if items.get('datasets'):
items = items.get('datasets')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'bigquery')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| 0.003756 |
"""Support for UV data from openuv.io."""
import logging
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_API_KEY, CONF_BINARY_SENSORS, CONF_ELEVATION,
CONF_LATITUDE, CONF_LONGITUDE, CONF_MONITORED_CONDITIONS, CONF_SENSORS)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.service import verify_domain_control
from .config_flow import configured_instances
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_OPENUV_CLIENT = 'data_client'
DATA_OPENUV_LISTENER = 'data_listener'
DATA_PROTECTION_WINDOW = 'protection_window'
DATA_UV = 'uv'
DEFAULT_ATTRIBUTION = 'Data provided by OpenUV'
NOTIFICATION_ID = 'openuv_notification'
NOTIFICATION_TITLE = 'OpenUV Component Setup'
TOPIC_UPDATE = '{0}_data_update'.format(DOMAIN)
TYPE_CURRENT_OZONE_LEVEL = 'current_ozone_level'
TYPE_CURRENT_UV_INDEX = 'current_uv_index'
TYPE_CURRENT_UV_LEVEL = 'current_uv_level'
TYPE_MAX_UV_INDEX = 'max_uv_index'
TYPE_PROTECTION_WINDOW = 'uv_protection_window'
TYPE_SAFE_EXPOSURE_TIME_1 = 'safe_exposure_time_type_1'
TYPE_SAFE_EXPOSURE_TIME_2 = 'safe_exposure_time_type_2'
TYPE_SAFE_EXPOSURE_TIME_3 = 'safe_exposure_time_type_3'
TYPE_SAFE_EXPOSURE_TIME_4 = 'safe_exposure_time_type_4'
TYPE_SAFE_EXPOSURE_TIME_5 = 'safe_exposure_time_type_5'
TYPE_SAFE_EXPOSURE_TIME_6 = 'safe_exposure_time_type_6'
BINARY_SENSORS = {
TYPE_PROTECTION_WINDOW: ('Protection Window', 'mdi:sunglasses')
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSORS = {
TYPE_CURRENT_OZONE_LEVEL: (
'Current Ozone Level', 'mdi:vector-triangle', 'du'),
TYPE_CURRENT_UV_INDEX: ('Current UV Index', 'mdi:weather-sunny', 'index'),
TYPE_CURRENT_UV_LEVEL: ('Current UV Level', 'mdi:weather-sunny', None),
TYPE_MAX_UV_INDEX: ('Max UV Index', 'mdi:weather-sunny', 'index'),
TYPE_SAFE_EXPOSURE_TIME_1: (
'Skin Type 1 Safe Exposure Time', 'mdi:timer', 'minutes'),
TYPE_SAFE_EXPOSURE_TIME_2: (
'Skin Type 2 Safe Exposure Time', 'mdi:timer', 'minutes'),
TYPE_SAFE_EXPOSURE_TIME_3: (
'Skin Type 3 Safe Exposure Time', 'mdi:timer', 'minutes'),
TYPE_SAFE_EXPOSURE_TIME_4: (
'Skin Type 4 Safe Exposure Time', 'mdi:timer', 'minutes'),
TYPE_SAFE_EXPOSURE_TIME_5: (
'Skin Type 5 Safe Exposure Time', 'mdi:timer', 'minutes'),
TYPE_SAFE_EXPOSURE_TIME_6: (
'Skin Type 6 Safe Exposure Time', 'mdi:timer', 'minutes'),
}
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ELEVATION): float,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_BINARY_SENSORS, default={}):
BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the OpenUV component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_OPENUV_CLIENT] = {}
hass.data[DOMAIN][DATA_OPENUV_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
identifier = '{0}, {1}'.format(
conf.get(CONF_LATITUDE, hass.config.latitude),
conf.get(CONF_LONGITUDE, hass.config.longitude))
if identifier in configured_instances(hass):
return True
data = {
CONF_API_KEY: conf[CONF_API_KEY],
CONF_BINARY_SENSORS: conf[CONF_BINARY_SENSORS],
CONF_SENSORS: conf[CONF_SENSORS],
}
if CONF_LATITUDE in conf:
data[CONF_LATITUDE] = conf[CONF_LATITUDE]
if CONF_LONGITUDE in conf:
data[CONF_LONGITUDE] = conf[CONF_LONGITUDE]
if CONF_ELEVATION in conf:
data[CONF_ELEVATION] = conf[CONF_ELEVATION]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': SOURCE_IMPORT}, data=data))
return True
async def async_setup_entry(hass, config_entry):
"""Set up OpenUV as config entry."""
from pyopenuv import Client
from pyopenuv.errors import OpenUvError
_verify_domain_control = verify_domain_control(hass, DOMAIN)
try:
websession = aiohttp_client.async_get_clientsession(hass)
openuv = OpenUV(
Client(
config_entry.data[CONF_API_KEY],
config_entry.data.get(CONF_LATITUDE, hass.config.latitude),
config_entry.data.get(CONF_LONGITUDE, hass.config.longitude),
websession,
altitude=config_entry.data.get(
CONF_ELEVATION, hass.config.elevation)),
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)))
await openuv.async_update()
hass.data[DOMAIN][DATA_OPENUV_CLIENT][config_entry.entry_id] = openuv
except OpenUvError as err:
_LOGGER.error('Config entry failed: %s', err)
raise ConfigEntryNotReady
for component in ('binary_sensor', 'sensor'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
@_verify_domain_control
async def update_data(service):
"""Refresh OpenUV data."""
_LOGGER.debug('Refreshing OpenUV data')
try:
await openuv.async_update()
except OpenUvError as err:
_LOGGER.error('Error during data update: %s', err)
return
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.services.async_register(DOMAIN, 'update_data', update_data)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_OPENUV_CLIENT].pop(config_entry.entry_id)
for component in ('binary_sensor', 'sensor'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
class OpenUV:
"""Define a generic OpenUV object."""
def __init__(self, client, binary_sensor_conditions, sensor_conditions):
"""Initialize."""
self.binary_sensor_conditions = binary_sensor_conditions
self.client = client
self.data = {}
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
if TYPE_PROTECTION_WINDOW in self.binary_sensor_conditions:
resp = await self.client.uv_protection_window()
data = resp['result']
if data.get('from_time') and data.get('to_time'):
self.data[DATA_PROTECTION_WINDOW] = data
else:
_LOGGER.debug(
'No valid protection window data for this location')
self.data[DATA_PROTECTION_WINDOW] = {}
if any(c in self.sensor_conditions for c in SENSORS):
data = await self.client.uv_index()
self.data[DATA_UV] = data
class OpenUvEntity(Entity):
"""Define a generic OpenUV entity."""
def __init__(self, openuv):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = None
self.openuv = openuv
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return self._name
| 0 |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import ctypes
from ctypes import windll, wintypes, POINTER, Structure, c_void_p, c_char_p
from ctypes.wintypes import DWORD
from .._ffi import FFIEngineError
from .._types import str_cls
from ..errors import LibraryNotFoundError
from ._kernel32 import kernel32
__all__ = [
'crypt32',
'get_error',
]
try:
crypt32 = windll.crypt32
except (OSError) as e:
if str_cls(e).find('The specified module could not be found') != -1:
raise LibraryNotFoundError('crypt32.dll could not be found')
raise
HCERTSTORE = wintypes.HANDLE
HCERTCHAINENGINE = wintypes.HANDLE
HCRYPTPROV = wintypes.HANDLE
HCRYPTKEY = wintypes.HANDLE
PBYTE = c_char_p
if sys.maxsize > 2 ** 32:
ULONG_PTR = ctypes.c_uint64
else:
ULONG_PTR = ctypes.c_ulong
try:
class CRYPTOAPI_BLOB(Structure): # noqa
_fields_ = [
("cbData", DWORD),
("pbData", c_void_p),
]
CRYPT_INTEGER_BLOB = CRYPTOAPI_BLOB
CERT_NAME_BLOB = CRYPTOAPI_BLOB
CRYPT_BIT_BLOB = CRYPTOAPI_BLOB
CRYPT_OBJID_BLOB = CRYPTOAPI_BLOB
class CRYPT_ALGORITHM_IDENTIFIER(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("Parameters", CRYPT_OBJID_BLOB),
]
class CERT_PUBLIC_KEY_INFO(Structure): # noqa
_fields_ = [
("Algorithm", CRYPT_ALGORITHM_IDENTIFIER),
("PublicKey", CRYPT_BIT_BLOB),
]
class CERT_EXTENSION(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("fCritical", wintypes.BOOL),
("Value", CRYPT_OBJID_BLOB),
]
PCERT_EXTENSION = POINTER(CERT_EXTENSION)
class CERT_INFO(Structure): # noqa
_fields_ = [
("dwVersion", DWORD),
("SerialNumber", CRYPT_INTEGER_BLOB),
("SignatureAlgorithm", CRYPT_ALGORITHM_IDENTIFIER),
("Issuer", CERT_NAME_BLOB),
("NotBefore", kernel32.FILETIME),
("NotAfter", kernel32.FILETIME),
("Subject", CERT_NAME_BLOB),
("SubjectPublicKeyInfo", CERT_PUBLIC_KEY_INFO),
("IssuerUniqueId", CRYPT_BIT_BLOB),
("SubjectUniqueId", CRYPT_BIT_BLOB),
("cExtension", DWORD),
("rgExtension", POINTER(PCERT_EXTENSION)),
]
PCERT_INFO = POINTER(CERT_INFO)
class CERT_CONTEXT(Structure): # noqa
_fields_ = [
("dwCertEncodingType", DWORD),
("pbCertEncoded", c_void_p),
("cbCertEncoded", DWORD),
("pCertInfo", PCERT_INFO),
("hCertStore", HCERTSTORE)
]
PCERT_CONTEXT = POINTER(CERT_CONTEXT)
class CERT_ENHKEY_USAGE(Structure): # noqa
_fields_ = [
('cUsageIdentifier', DWORD),
('rgpszUsageIdentifier', POINTER(wintypes.BYTE)),
]
PCERT_ENHKEY_USAGE = POINTER(CERT_ENHKEY_USAGE)
class CERT_TRUST_STATUS(Structure): # noqa
_fields_ = [
('dwErrorStatus', DWORD),
('dwInfoStatus', DWORD),
]
class CERT_CHAIN_ELEMENT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('pCertContext', PCERT_CONTEXT),
('TrustStatus', CERT_TRUST_STATUS),
('pRevocationInfo', c_void_p),
('pIssuanceUsage', PCERT_ENHKEY_USAGE),
('pApplicationUsage', PCERT_ENHKEY_USAGE),
('pwszExtendedErrorInfo', wintypes.LPCWSTR),
]
PCERT_CHAIN_ELEMENT = POINTER(CERT_CHAIN_ELEMENT)
class CERT_SIMPLE_CHAIN(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cElement', DWORD),
('rgpElement', POINTER(PCERT_CHAIN_ELEMENT)),
('pTrustListInfo', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_SIMPLE_CHAIN = POINTER(CERT_SIMPLE_CHAIN)
class CERT_CHAIN_CONTEXT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cChain', DWORD),
('rgpChain', POINTER(PCERT_SIMPLE_CHAIN)),
('cLowerQualityChainContext', DWORD),
('rgpLowerQualityChainContext', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_CHAIN_CONTEXT = POINTER(CERT_CHAIN_CONTEXT)
class CERT_USAGE_MATCH(Structure): # noqa
_fields_ = [
('dwType', DWORD),
('Usage', CERT_ENHKEY_USAGE),
]
class CERT_CHAIN_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('RequestedUsage', CERT_USAGE_MATCH),
]
class CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwFlags', DWORD),
('pvExtraPolicyPara', c_void_p),
]
class SSL_EXTRA_CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwAuthType', DWORD),
('fdwChecks', DWORD),
('pwszServerName', wintypes.LPCWSTR),
]
class CERT_CHAIN_POLICY_STATUS(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwError', DWORD),
('lChainIndex', wintypes.LONG),
('lElementIndex', wintypes.LONG),
('pvExtraPolicyStatus', c_void_p),
]
crypt32.CertOpenStore.argtypes = [
wintypes.LPCSTR,
DWORD,
HCRYPTPROV,
DWORD,
c_void_p
]
crypt32.CertOpenStore.restype = HCERTSTORE
crypt32.CertAddEncodedCertificateToStore.argtypes = [
HCERTSTORE,
DWORD,
PBYTE,
DWORD,
DWORD,
POINTER(PCERT_CONTEXT)
]
crypt32.CertAddEncodedCertificateToStore.restype = wintypes.BOOL
crypt32.CertGetCertificateChain.argtypes = [
HCERTCHAINENGINE,
PCERT_CONTEXT,
POINTER(kernel32.FILETIME),
HCERTSTORE,
POINTER(CERT_CHAIN_PARA),
DWORD,
c_void_p,
POINTER(PCERT_CHAIN_CONTEXT)
]
crypt32.CertGetCertificateChain.restype = wintypes.BOOL
crypt32.CertVerifyCertificateChainPolicy.argtypes = [
ULONG_PTR,
PCERT_CHAIN_CONTEXT,
POINTER(CERT_CHAIN_POLICY_PARA),
POINTER(CERT_CHAIN_POLICY_STATUS)
]
crypt32.CertVerifyCertificateChainPolicy.restype = wintypes.BOOL
crypt32.CertFreeCertificateChain.argtypes = [
PCERT_CHAIN_CONTEXT
]
crypt32.CertFreeCertificateChain.restype = None
crypt32.CertOpenSystemStoreW.argtypes = [
wintypes.HANDLE,
wintypes.LPCWSTR
]
crypt32.CertOpenSystemStoreW.restype = HCERTSTORE
crypt32.CertEnumCertificatesInStore.argtypes = [
HCERTSTORE,
PCERT_CONTEXT
]
crypt32.CertEnumCertificatesInStore.restype = PCERT_CONTEXT
crypt32.CertCloseStore.argtypes = [
HCERTSTORE,
DWORD
]
crypt32.CertCloseStore.restype = wintypes.BOOL
crypt32.CertGetEnhancedKeyUsage.argtypes = [
PCERT_CONTEXT,
DWORD,
c_void_p,
POINTER(DWORD)
]
crypt32.CertGetEnhancedKeyUsage.restype = wintypes.BOOL
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(crypt32, 'FILETIME', kernel32.FILETIME)
setattr(crypt32, 'CERT_ENHKEY_USAGE', CERT_ENHKEY_USAGE)
setattr(crypt32, 'CERT_CONTEXT', CERT_CONTEXT)
setattr(crypt32, 'PCERT_CONTEXT', PCERT_CONTEXT)
setattr(crypt32, 'CERT_USAGE_MATCH', CERT_USAGE_MATCH)
setattr(crypt32, 'CERT_CHAIN_PARA', CERT_CHAIN_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_PARA', CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA', SSL_EXTRA_CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_STATUS', CERT_CHAIN_POLICY_STATUS)
setattr(crypt32, 'PCERT_CHAIN_CONTEXT', PCERT_CHAIN_CONTEXT)
def get_error():
error = ctypes.GetLastError()
return (error, ctypes.FormatError(error))
| 0.000244 |
#!/usr/bin/env python
import submodules
import pretty
class User:
def __init__(self,username):
self.username = username
def load(self,depth='all'):
"""Calls all the necessary submodules to generate
user's contribution history.
Depth is used to specify how detailed the history
should be. 'all' (default), 'key' (only key events),
or 'minimal' (only user statistics)
"""
userdata = {}
collector = submodules.Collector(user=self.username,depth=depth)
collector.collect()
self.userdata = collector.output("userdata")
self.timeline = collector.output("timeline")
def raw(self):
"""Returns a raw dict of data points from the
user's contribution history."""
return self.userdata
def pretty(self):
"""Returns natural language summary of the user's
contribution history."""
return pretty.prettify(user=self.username,userdata=self.userdata,timeline=self.timeline)
if __name__ == '__main__':
import sys
data = User(username=sys.argv[1])
data.load(depth=sys.argv[2])
print data.raw()
print data.pretty()
| 0.033493 |
import json
import os
import random
import scipy.io
import codecs
import numpy as np
from collections import defaultdict
dataset = 'coco'
data_file = 'dataset_newfeat.json'
src_file_Tr = '/triton/ics/project/imagedb/picsom/databases/COCO/download/annotations/instances_train2014.json'
src_file_val = '/triton/ics/project/imagedb/picsom/databases/COCO/download/annotations/sentences_val2014.json'
print 'Initializing data provider for dataset %s...' % (dataset, )
# !assumptions on folder structure
dataset_root = os.path.join('data', dataset)
# load the dataset into memory
dataset_path = os.path.join(dataset_root, data_file)
print 'BasicDataProvider: reading %s' % (dataset_path, )
dB = json.load(open(dataset_path, 'r'))
srcdB_train = json.load(open(src_file_Tr, 'r'))
srcdB_val = json.load(open(src_file_val, 'r'))
trn_idx = 0
val_idx = 0
val_idx_offset = len(srcdB_train['images'])
# group images by their train/val/test split into a dictionary -> list structure
for img in dB['images']:
if img['split'] == 'train':
assert img['cocoid'] == srcdB_train['images'][trn_idx]['id'], 'Ids dont match, training'
img['imgid'] = trn_idx
trn_idx += 1
else:
assert img['cocoid'] == srcdB_val['images'][val_idx]['id'], 'Ids dont match, training'
img['imgid'] = val_idx + val_idx_offset
val_idx += 1
print 'Done with %d %d!! Now writing back dataset ' % (trn_idx, val_idx)
json.dump(dB,open(dataset_path, 'w'))
| 0.010847 |
import logging
logger = logging.getLogger(name=__name__)
import pyqtgraph as pg
import numpy as np
import time
from qtpy import QtCore
from .test_redpitaya import TestRedpitaya
from .. import APP
from ..async_utils import sleep as async_sleep
class TestPyqtgraph(TestRedpitaya):
""" This test case creates a maximally simplistic scope gui
that continuously plots the data of both scope channels,
and checks the obtainable frame rate.
Frame rates down to 20 Hz are accepted """
N = 2 ** 14
cycles = 50 # cycles to average frame rate over
frequency = 10.0
duration = 1.0
dt = 0.01 # maximum frame rate is 100 Hz
REDPITAYA = False # REDPITAYA=True tests the speed with Red Pitaya Scope
timeout = 10.0 # timeout if the gui never plots anything
def setup(self):
self.t0 = np.linspace(0, self.duration, self.N)
self.plotWidget = pg.plot(title="Realtime plotting benchmark")
self.cycle = 0
self.starttime = time.time() # not yet the actual starttime, but needed for timeout
if self.REDPITAYA:
self.r.scope.setup(trigger_source='immediately', duration=self.duration)
self.timer = QtCore.QTimer()
self.timer.setInterval(1000*self.dt)
self.timer.timeout.connect(self.update_plot)
self.timer.start()
def teardown(self):
self.timer.stop()
APP.processEvents()
self.plotWidget.close()
APP.processEvents()
def update_plot(self):
self.cycle += 1
if self.cycle == 1:
self.starttime = time.time()
if self.cycle == self.cycles:
self.endtime = time.time()
if self.REDPITAYA:
t = self.r.scope.times
#y1 = self.r.scope.curve(ch=1, timeout=0)
#y2 = self.r.scope.curve(ch=2, timeout=0)
#self.r.scope.setup()
y1 = self.r.scope._data_ch1_current
y2 = self.r.scope._data_ch2_current
else:
t = self.t0 + (time.time()-self.starttime)
phi = 2.0*np.pi*self.frequency*t
y1 = np.sin(phi)
y2 = np.cos(phi)
if self.cycle == 1:
self.c1 = self.plotWidget.plot(t, y1, pen='g')
self.c2 = self.plotWidget.plot(t, y2, pen='r')
else:
self.c1.setData(t, y1)
self.c2.setData(t, y2)
def test_speed(self):
# for now, this test is a cause of hangup
# return
# wait for the gui to display all required curves
while self.cycle < self.cycles or (time.time() > self.timeout + self.starttime):
# this is needed such that the test GUI actually plots something
async_sleep(0.01)
if self.cycle < self.cycles:
assert False, "Must complete %d cycles before testing for speed!"%self.cycles
else:
# time per frame
dt = (self.endtime - self.starttime) / self.cycles
print("Frame rate: %f Hz"%(1.0/dt))
dt *= 1e3
print("Update period: %f ms" %(dt))
# require at least 20 fps
assert (dt < 50.0), \
"Frame update time of %f ms with%s redpitaya scope is above specification of 50 ms!" \
% ('out' if self.REDPITAYA else '', dt)
| 0.005458 |
# coding=utf-8
import os
from django.db import models
def establecer_destino_archivo_imagen(instance, filename):
"""
Establece la ruta de destino para el archivo de imagen cargado a la instancia.
"""
# Almacena el archivo en:
# 'app_reservas/carruseles/<carrusel>/<imagen>'
ruta_archivos_ubicacion = 'app_reservas/carruseles/{}/'.format(
instance.carrusel.slug,
)
return os.path.join(ruta_archivos_ubicacion, filename)
class ImagenCarrusel(models.Model):
# Atributos
orden = models.PositiveSmallIntegerField(
default=1,
verbose_name='Orden',
help_text='Orden de la imagen en el carrusel.',
)
imagen = models.ImageField(
upload_to=establecer_destino_archivo_imagen,
verbose_name='Imagen',
help_text='Archivo de imagen.',
)
# Relaciones
carrusel = models.ForeignKey(
'CarruselImagenes',
related_name='imagenes',
verbose_name='Carrusel',
)
class Meta:
"""
Información de la clase.
"""
app_label = 'app_reservas'
ordering = ['carrusel', 'orden']
verbose_name = 'Imagen de carrusel'
verbose_name_plural = 'Imágenes de carrusel'
def __str__(self):
"""
Representación de la instancia.
"""
return "Imagen {0:d} del carrusel '{1!s}'".format(
self.orden,
self.carrusel,
)
def get_url(self):
"""
Retorna la URL de la imagen.
"""
return self.imagen.url
| 0.000642 |
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import block_device
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
_BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance']
BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD
def _expected_cols(expected_attrs):
return [attr for attr in expected_attrs
if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD]
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add instance_uuid to get_by_volume_id method
# Version 1.2: Instance version 1.14
# Version 1.3: Instance version 1.15
# Version 1.4: Instance version 1.16
# Version 1.5: Instance version 1.17
# Version 1.6: Instance version 1.18
# Version 1.7: Add update_or_create method
# Version 1.8: Instance version 1.19
# Version 1.9: Instance version 1.20
# Version 1.10: Changed source_type field to BlockDeviceSourceTypeField.
# Version 1.11: Changed destination_type field to
# BlockDeviceDestinationTypeField.
# Version 1.12: Changed device_type field to BlockDeviceTypeField.
# Version 1.13: Instance version 1.21
VERSION = '1.13'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'instance': fields.ObjectField('Instance', nullable=True),
'source_type': fields.BlockDeviceSourceTypeField(nullable=True),
'destination_type': fields.BlockDeviceDestinationTypeField(
nullable=True),
'guest_format': fields.StringField(nullable=True),
'device_type': fields.BlockDeviceTypeField(nullable=True),
'disk_bus': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'device_name': fields.StringField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'snapshot_id': fields.StringField(nullable=True),
'volume_id': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'image_id': fields.StringField(nullable=True),
'no_device': fields.BooleanField(default=False),
'connection_info': fields.StringField(nullable=True),
}
obj_relationships = {
'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'),
('1.4', '1.16'), ('1.5', '1.17'), ('1.6', '1.18'),
('1.8', '1.19'), ('1.9', '1.20'), ('1.13', '1.21')],
}
@staticmethod
def _from_db_object(context, block_device_obj,
db_block_device, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for key in block_device_obj.fields:
if key in BLOCK_DEVICE_OPTIONAL_ATTRS:
continue
block_device_obj[key] = db_block_device[key]
if 'instance' in expected_attrs:
my_inst = objects.Instance(context)
my_inst._from_db_object(context, my_inst,
db_block_device['instance'])
block_device_obj.instance = my_inst
block_device_obj._context = context
block_device_obj.obj_reset_changes()
return block_device_obj
def _create(self, context, update_or_create=False):
"""Create the block device record in the database.
In case the id field is set on the object, and if the instance is set
raise an ObjectActionError. Resets all the changes on the object.
Returns None
:param context: security context used for database calls
:param update_or_create: consider existing block devices for the
instance based on the device name and swap, and only update
the ones that match. Normally only used when creating the
instance for the first time.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api':
raise exception.ObjectActionError(
action='create',
reason='BlockDeviceMapping cannot be '
'created in the API cell.')
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='create',
reason='instance assigned')
cells_create = update_or_create or None
if update_or_create:
db_bdm = db.block_device_mapping_update_or_create(
context, updates, legacy=False)
else:
db_bdm = db.block_device_mapping_create(
context, updates, legacy=False)
self._from_db_object(context, self, db_bdm)
# NOTE(alaski): bdms are looked up by instance uuid and device_name
# so if we sync up with no device_name an entry will be created that
# will not be found on a later update_or_create call and a second bdm
# create will occur.
if cell_type == 'compute' and db_bdm.get('device_name') is not None:
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(
context, self, create=cells_create)
@base.remotable
def create(self):
self._create(self._context)
@base.remotable
def update_or_create(self):
self._create(self._context, update_or_create=True)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
db.block_device_mapping_destroy(self._context, self.id)
delattr(self, base.get_attrname('id'))
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_destroy_at_top(self._context, self.instance_uuid,
device_name=self.device_name,
volume_id=self.volume_id)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='save',
reason='instance changed')
updates.pop('id', None)
updated = db.block_device_mapping_update(self._context, self.id,
updates, legacy=False)
if not updated:
raise exception.BDMNotFound(id=self.id)
self._from_db_object(self._context, self, updated)
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(self._context, self)
@base.remotable_classmethod
def get_by_volume_id(cls, context, volume_id,
instance_uuid=None, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_bdm = db.block_device_mapping_get_by_volume_id(
context, volume_id, _expected_cols(expected_attrs))
if not db_bdm:
raise exception.VolumeBDMNotFound(volume_id=volume_id)
# NOTE (ndipanov): Move this to the db layer into a
# get_by_instance_and_volume_id method
if instance_uuid and instance_uuid != db_bdm['instance_uuid']:
raise exception.InvalidVolume(
reason=_("Volume does not belong to the "
"requested instance."))
return cls._from_db_object(context, cls(), db_bdm,
expected_attrs=expected_attrs)
@property
def is_root(self):
return self.boot_index == 0
@property
def is_volume(self):
return (self.destination_type ==
fields.BlockDeviceDestinationType.VOLUME)
@property
def is_image(self):
return self.source_type == fields.BlockDeviceSourceType.IMAGE
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
self.instance = objects.Instance.get_by_uuid(self._context,
self.instance_uuid)
self.obj_reset_changes(fields=['instance'])
@base.NovaObjectRegistry.register
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
# Version 1.2: Added use_slave to get_by_instance_uuid
# Version 1.3: BlockDeviceMapping <= version 1.2
# Version 1.4: BlockDeviceMapping <= version 1.3
# Version 1.5: BlockDeviceMapping <= version 1.4
# Version 1.6: BlockDeviceMapping <= version 1.5
# Version 1.7: BlockDeviceMapping <= version 1.6
# Version 1.8: BlockDeviceMapping <= version 1.7
# Version 1.9: BlockDeviceMapping <= version 1.8
# Version 1.10: BlockDeviceMapping <= version 1.9
# Version 1.11: BlockDeviceMapping <= version 1.10
# Version 1.12: BlockDeviceMapping <= version 1.11
# Version 1.13: BlockDeviceMapping <= version 1.12
# Version 1.14: BlockDeviceMapping <= version 1.13
VERSION = '1.14'
fields = {
'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
}
obj_relationships = {
'objects': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.1'),
('1.3', '1.2'), ('1.4', '1.3'), ('1.5', '1.4'),
('1.6', '1.5'), ('1.7', '1.6'), ('1.8', '1.7'),
('1.9', '1.8'), ('1.10', '1.9'), ('1.11', '1.10'),
('1.12', '1.11'), ('1.13', '1.12'), ('1.14', '1.13')],
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
def root_bdm(self):
try:
return next(bdm_obj for bdm_obj in self if bdm_obj.is_root)
except StopIteration:
return
def root_metadata(self, context, image_api, volume_api):
root_bdm = self.root_bdm()
if not root_bdm:
return {}
if root_bdm.is_volume:
try:
volume = volume_api.get(context, root_bdm.volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=root_bdm.id)
elif root_bdm.is_image:
try:
image_meta = image_api.show(context, root_bdm.image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=root_bdm.id)
else:
return {}
def block_device_make_list(context, db_list, **extra_args):
return base.obj_make_list(context,
objects.BlockDeviceMappingList(context),
objects.BlockDeviceMapping, db_list,
**extra_args)
def block_device_make_list_from_dicts(context, bdm_dicts_list):
bdm_objects = [objects.BlockDeviceMapping(context=context, **bdm)
for bdm in bdm_dicts_list]
return BlockDeviceMappingList(objects=bdm_objects)
| 0.00015 |
""" Client commands """
from datetime import datetime
from pprint import pprint
from steward.colors import green, red, yellow, magenta
def _fuzzy_timedelta(td):
""" Format a timedelta into a *loose* 'X time ago' string """
ago_str = lambda x, y: '%d %s%s ago' % (x, y, 's' if x > 1 else '')
if td.days > 0:
return ago_str(td.days, 'day')
hours = td.seconds / 3600
if hours > 0:
return ago_str(hours, 'hour')
minutes = td.seconds / 60
if minutes > 0:
return ago_str(minutes, 'minute')
return ago_str(td.seconds, 'second')
def _format_check_status(status):
""" Turn a check status into a nicely-formatted string """
string = status['check'] + ': '
if status['retcode'] == 0:
string += "SUCCESS"
color = green
elif status['retcode'] == 1:
string += "WARNING"
color = yellow
else:
string += "ERROR(%d)" % status['retcode']
color = red
string = color(string)
if not status.get('enabled', True):
string += ' (disabled)'
if 'last_run' in status:
ran_at = datetime.fromtimestamp(status['last_run'])
string += '\nRan at %s (%s)' % (ran_at.isoformat(),
_fuzzy_timedelta(datetime.now() -
ran_at))
else:
ran_at = datetime.fromtimestamp(status['created'])
string += '\nCreated at %s (%s)' % (ran_at.isoformat(),
_fuzzy_timedelta(datetime.now() -
ran_at))
if status.get('stdout'):
string += "\nSTDOUT:\n%s" % status['stdout']
if status.get('stderr'):
string += "\nSTDERR:\n%s" % status['stderr']
return string
def do_alerts(client):
""" Print all active alerts """
response = client.cmd('palantir/alert/list').json()
# Sort by minion, then by check name
response.sort(key=lambda x: x['check'])
response.sort(key=lambda x: x['minion'])
for alert in response:
alert['name'] = alert['check']
color = yellow if alert['retcode'] == 1 else red
print "{} - {}".format(color(alert['minion']),
_format_check_status(alert))
def do_checks(client, check=None):
"""
List the Palantir checks or print details of one in particular
Parameters
----------
check : str, optional
If specified, print out the details of this check
"""
response = client.cmd('palantir/check/list').json()
if check is None:
for name, check in response.iteritems():
line = name
if not check['enabled']:
line += ' (disabled)'
print line
else:
pprint(response[check])
def do_minions(client):
""" Print the list of minions """
response = client.cmd('palantir/minion/list').json()
for name in sorted(response):
minion = response[name]
if minion['enabled']:
print minion['name']
else:
print minion['name'] + ' (disabled)'
def do_status(client, minion, check=None):
"""
Print the result of the last check on a minion
Parameters
----------
minion : str
Name of the minion
check : str, optional
Name of the check. If not provided, print all checks.
"""
if check is None:
response = client.cmd('palantir/minion/get', minion=minion).json()
header = response['name']
if not response['enabled']:
header += ' (disabled)'
print magenta('-' * len(header))
print magenta(header)
for check in response['checks']:
print _format_check_status(check)
else:
response = client.cmd('palantir/minion/check/get', minion=minion,
check=check).json()
if response is None:
print "Check %s not found on %s" % (check, minion)
return
response['name'] = check
print _format_check_status(response)
def do_run_check(client, check):
"""
Run a Palantir check
Parameters
----------
check : str
Name of the check to run
"""
response = client.cmd('palantir/check/run', name=check).json()
if isinstance(response, basestring):
print response
else:
for minion, result in response.iteritems():
result['name'] = check
print '{}: {}'.format(green(minion), _format_check_status(result))
def do_resolve(client, minion, check):
"""
Mark an alert as resolved
Parameters
----------
minion : str
Name of the minion
check : str
Name of the check
"""
alert = {'minion': minion, 'check': check}
client.cmd('palantir/alert/resolve', alerts=[alert])
def do_minion_enable(client, *minions):
"""
Enable one or more minions
Parameters
----------
*minions : list
The minions to enable
"""
client.cmd('palantir/minion/toggle', minions=minions, enabled=True)
def do_minion_disable(client, *minions):
"""
Disable one or more minions
Parameters
----------
*minions : list
The minions to disable
"""
client.cmd('palantir/minion/toggle', minions=minions, enabled=False)
def do_check_enable(client, *checks):
"""
Enable one or more checks
Parameters
----------
*checks : list
The checks to enable
"""
client.cmd('palantir/check/toggle', checks=checks, enabled=True)
def do_check_disable(client, *checks):
"""
Disable one or more checks
Parameters
----------
*checks : list
The checks to disable
"""
client.cmd('palantir/check/toggle', checks=checks, enabled=False)
def do_minion_check_enable(client, minion, *checks):
"""
Enable one or more checks on a specific minion
Parameters
----------
minion : str
The minion to enable checks on
*checks : list
The checks to enable on the minion
"""
client.cmd('palantir/minion/check/toggle', minion=minion, checks=checks,
enabled=True)
def do_minion_check_disable(client, minion, *checks):
"""
Disable one or more checks on a specific minion
Parameters
----------
minion : str
The minions to disable checks on
*checks : list
The checks to disable on the minion
"""
client.cmd('palantir/minion/check/toggle', minion=minion, checks=checks,
enabled=False)
| 0.000152 |
# -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def add_follower(self, cr, uid, ids, employee_id, context=None):
employee = self.pool['hr.employee'].browse(cr, uid, employee_id, context=context)
if employee.user_id:
self.message_subscribe(cr, uid, ids, [employee.user_id.partner_id.id], context=context)
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
employee_id = values.get('employee_id', False)
context = dict(context, mail_create_nolog=True, mail_create_nosubscribe=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
hr_holiday_id = super(hr_holidays, self).create(cr, uid, values, context=context)
self.add_follower(cr, uid, [hr_holiday_id], employee_id, context=context)
return hr_holiday_id
def write(self, cr, uid, ids, vals, context=None):
employee_id = vals.get('employee_id', False)
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
hr_holiday_id = super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
self.add_follower(cr, uid, ids, employee_id, context=context)
return hr_holiday_id
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
batch_context = dict(context, mail_notify_force_send=False)
for emp in obj_emp.browse(cr, uid, emp_ids, context=context):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=batch_context))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.007123 |
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
| 0.000345 |
# Django settings for crawler project.
import os
import sys
selfpath = os.path.split(os.path.realpath(__file__))[0]
PATH = os.path.abspath(os.path.join(selfpath,'..'))
ENV = os.getenv('ENV')
if ENV in ['DEBUG','DEV']:
isdebug = True
STATIC_ROOT = ''
STATICFILES_DIRS = (PATH + '/static',)
else:
STATIC_ROOT = PATH + '/static'
STATICFILES_DIRS = ()
isdebug = False
DEBUG = isdebug
# TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mytest', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'x09083412',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = '*'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be fosudound here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = STATIC_ROOT
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = STATICFILES_DIRS
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'm0m$h10pucsnam7p6-xlvqtli&y75-kkw&)9&%dv-l*y5d35ey'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# by max 2013 11 17
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
"django.core.context_processors.request",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.middleware.csrf.CsrfResponseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pagination.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'crawler.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'crawler.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#'/home/max/PycharmProjects/max-x.net/crawler/templates',
#'/home/max/PycharmProjects/max-x.net/crawler/templates/car',
PATH + '/templates/car',
PATH + '/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'spider',
'account',
'django.contrib.comments',
'pagination',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
#},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': True,
# 'formatters': {
# 'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'filters': {
# 'special': {
# '()': 'project.logging.SpecialFilter',
# 'foo': 'bar',
# }
# },
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console':{
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# 'formatter': 'simple'
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'class': 'django.utils.log.AdminEmailHandler',
# 'filters': ['special']
# }
# },
# 'loggers': {
# 'django': {
# 'handlers': ['null'],
# 'propagate': True,
# 'level': 'INFO',
# },
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': False,
# },
# 'myproject.custom': {
# 'handlers': ['console', 'mail_admins'],
# 'level': 'INFO',
# 'filters': ['special']
# }
# }
#} | 0.002387 |
"""Functions for generating and parsing HTTP Accept: headers for
supporting server-directed content negotiation.
"""
def generateAcceptHeader(*elements):
"""Generate an accept header value
[str or (str, float)] -> str
"""
parts = []
for element in elements:
if type(element) is str:
qs = "1.0"
mtype = element
else:
mtype, q = element
q = float(q)
if q > 1 or q <= 0:
raise ValueError('Invalid preference factor: %r' % q)
qs = '%0.1f' % (q,)
parts.append((qs, mtype))
parts.sort()
chunks = []
for q, mtype in parts:
if q == '1.0':
chunks.append(mtype)
else:
chunks.append('%s; q=%s' % (mtype, q))
return ', '.join(chunks)
def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept]
def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list]
def getAcceptable(accept_header, have_types):
"""Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str]
"""
accepted = parseAcceptHeader(accept_header)
preferred = matchTypes(accepted, have_types)
return [mtype for (mtype, _) in preferred]
| 0.001069 |
import time
import warnings
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
from .base import Connection
from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError
from ..compat import urlencode, string_types
class RequestsHttpConnection(Connection):
"""
Connection using the `requests` library.
:arg http_auth: optional http auth information as either ':' separated
string or a tuple. Any value will be passed into requests as `auth`.
:arg use_ssl: use ssl for the connection if `True`
:arg verify_certs: whether to verify SSL certificates
:arg ca_certs: optional path to CA bundle. By default standard requests'
bundle will be used.
:arg client_cert: path to the file containing the private key and the
certificate
"""
def __init__(self, host='localhost', port=9200, http_auth=None,
use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None,
**kwargs):
if not REQUESTS_AVAILABLE:
raise ImproperlyConfigured("Please install requests to use RequestsHttpConnection.")
super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)
self.session = requests.session()
if http_auth is not None:
if isinstance(http_auth, (tuple, list)):
http_auth = tuple(http_auth)
elif isinstance(http_auth, string_types):
http_auth = tuple(http_auth.split(':', 1))
self.session.auth = http_auth
self.base_url = 'http%s://%s:%d%s' % (
's' if use_ssl else '',
host, port, self.url_prefix
)
self.session.verify = verify_certs
self.session.cert = client_cert
if ca_certs:
if not verify_certs:
raise ImproperlyConfigured("You cannot pass CA certificates when verify SSL is off.")
self.session.verify = ca_certs
if use_ssl and not verify_certs:
warnings.warn(
'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
try:
response = self.session.request(method, url, data=body, timeout=timeout or self.timeout)
duration = time.time() - start
raw_data = response.text
except requests.exceptions.SSLError as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise SSLError('N/A', str(e), e)
except requests.Timeout as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionTimeout('TIMEOUT', str(e), e)
except requests.ConnectionError as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionError('N/A', str(e), e)
# raise errors based on http status codes, let the client handle those if needed
if not (200 <= response.status_code < 300) and response.status_code not in ignore:
self.log_request_fail(method, url, body, duration, response.status_code)
self._raise_error(response.status_code, raw_data)
self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration)
return response.status_code, response.headers, raw_data
| 0.005193 |
#! /usr/bin/env python3
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass')
-t/--time: use time.time() (default on Unix)
-c/--clock: use time.clock() (default on Windows)
-v/--verbose: print raw timing results; repeat for more digits precision
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
The difference in default timer function is because on Windows,
clock() has microsecond granularity but time()'s granularity is 1/60th
of a second; on Unix, clock() has 1/100th of a second granularity and
time() is much more precise. On either platform, the default timer
functions measure wall clock time, not the CPU time. This means that
other processes running on the same computer may interfere with the
timing. The best thing to do when accurate timing is necessary is to
repeat the timing a few times and use the best time. The -r option is
good for this; the default of 3 repetitions is probably enough in most
cases. On Unix, you can use clock() to measure CPU time.
Note: there is a certain baseline overhead associated with executing a
pass statement. The code here doesn't try to hide it, but you should
be aware of it. The baseline overhead can be measured by invoking the
program without arguments.
The baseline overhead differs between Python versions! Also, to
fairly compare older Python versions to Python 2.3, you may want to
use python -O for the older versions to avoid timing SET_LINENO
instructions.
"""
import gc
import sys
import time
try:
import itertools
except ImportError:
# Must be an older Python version (see timeit() below)
itertools = None
__all__ = ["Timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
for _i in _it:
%(stmt)s
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
def _template_func(setup, func):
"""Create a timer function. Used if the "statement" is a callable."""
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
for _i in _it:
_func()
_t1 = _timer()
return _t1 - _t0
return inner
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer):
"""Constructor. See class doc string."""
self.timer = timer
ns = {}
if isinstance(stmt, str):
stmt = reindent(stmt, 8)
if isinstance(setup, str):
setup = reindent(setup, 4)
src = template % {'stmt': stmt, 'setup': setup}
elif hasattr(setup, '__call__'):
src = template % {'stmt': stmt, 'setup': '_setup()'}
ns['_setup'] = setup
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec(code, globals(), ns)
self.inner = ns["inner"]
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, str):
_setup = setup
def setup():
exec(_setup, globals(), ns)
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
self.inner = _template_func(setup, stmt)
else:
raise ValueError("stmt is neither a string nor callable")
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
if itertools:
it = itertools.repeat(None, number)
else:
it = [None] * number
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer).repeat(repeat, number)
def main(args=None, *, _wrap_timer=None):
"""Main program, used when run as a script.
The optional 'args' argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
'_wrap_timer' is an internal interface used for unit testing. If it
is not None, it must be a callable that accepts a timer function
and returns another timer function (used for unit testing).
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
except getopt.error as err:
print(err)
print("use -h/--help for command line help")
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print(__doc__, end=' ')
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
if _wrap_timer is not None:
timer = _wrap_timer(timer)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
t.print_exc()
return 1
if verbose:
print("%d loops -> %.*g secs" % (number, precision, x))
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print("raw times:", " ".join(["%.*g" % (precision, x) for x in r]))
print("%d loops," % number, end=' ')
usec = best * 1e6 / number
if usec < 1000:
print("best of %d: %.*g usec per loop" % (repeat, precision, usec))
else:
msec = usec / 1000
if msec < 1000:
print("best of %d: %.*g msec per loop" % (repeat, precision, msec))
else:
sec = msec / 1000
print("best of %d: %.*g sec per loop" % (repeat, precision, sec))
return None
if __name__ == "__main__":
sys.exit(main())
| 0.001048 |
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import generic
import sickbeard
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import exceptions, logger
from sickbeard import tvcache, show_name_helpers
class NZBsRUSProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "NZBs'R'US")
self.cache = NZBsRUSCache(self)
self.url = 'https://www.nzbsrus.com/'
self.supportsBacklog = True
def isEnabled(self):
return sickbeard.NZBSRUS
def _checkAuth(self):
if sickbeard.NZBSRUS_UID in (None, "") or sickbeard.NZBSRUS_HASH in (None, ""):
raise exceptions.AuthException("NZBs'R'US authentication details are empty, check your config")
def _get_season_search_strings(self, show, season):
return [x for x in show_name_helpers.makeSceneSeasonSearchString(show, season)]
def _get_episode_search_strings(self, ep_obj):
return [x for x in show_name_helpers.makeSceneSearchString(ep_obj)]
def _doSearch(self, search, show=None):
params = {'uid': sickbeard.NZBSRUS_UID,
'key': sickbeard.NZBSRUS_HASH,
'xml': 1,
'age': sickbeard.USENET_RETENTION,
'lang0': 1, # English only from CouchPotato
'lang1': 1,
'lang3': 1,
'c91': 1, # TV:HD
'c104': 1, # TV:SD-x264
'c75': 1, # TV:XviD
'searchtext': search}
if not params['age']:
params['age'] = 500
searchURL = self.url + 'api.php?' + urllib.urlencode(params)
logger.log(u"NZBS'R'US search url: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
return []
if not data.startswith('<?xml'): # Error will be a single line of text
logger.log(u"NZBs'R'US error: " + data, logger.ERROR)
return []
root = etree.fromstring(data)
if root is None:
logger.log(u"Error trying to parse NZBS'R'US XML data.", logger.ERROR)
logger.log(u"RSS data: " + data, logger.DEBUG)
return []
return root.findall('./results/result')
def _get_title_and_url(self, element):
if element.find('title'): # RSS feed
title = element.find('title').text
url = element.find('link').text.replace('&', '&')
else: # API item
title = element.find('name').text
nzbID = element.find('id').text
key = element.find('key').text
url = self.url + 'nzbdownload_rss.php' + '/' + \
nzbID + '/' + sickbeard.NZBSRUS_UID + '/' + key + '/'
return (title, url)
class NZBsRUSCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll NZBs'R'US every 15 minutes max
self.minTime = 15
def _getRSSData(self):
url = self.provider.url + 'rssfeed.php?'
urlArgs = {'cat': '91,75,104', # HD,XviD,SD-x264
'i': sickbeard.NZBSRUS_UID,
'h': sickbeard.NZBSRUS_HASH}
url += urllib.urlencode(urlArgs)
logger.log(u"NZBs'R'US cache update URL: " + url, logger.DEBUG)
data = self.provider.getURL(url)
return data
def _checkAuth(self, data):
return data != 'Invalid Link'
provider = NZBsRUSProvider()
| 0.001137 |
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from cyber_py import cyber
from planning import Planning
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from subplot_st_main import StMainSubplot
from subplot_st_speed import StSpeedSubplot
from modules.planning.proto import planning_pb2
planning = Planning()
def update(frame_number):
st_main_subplot.show(planning)
st_speed_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_st_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((1, 2), (0, 0))
st_main_subplot = StMainSubplot(ax, 'QpSplineStSpeedOptimizer')
ax2 = plt.subplot2grid((1, 2), (0, 1))
st_speed_subplot = StSpeedSubplot(ax2, "QpSplineStSpeedOptimizer")
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown() | 0.001011 |
# Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle, cPickle
from test.test_support import (TESTFN, unlink, run_unittest, captured_output,
check_warnings, cpython_only)
from test.test_pep352 import ignore_deprecation_warnings
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def testReload(self):
# Reloading the built-in exceptions module failed prior to Py2.2, while it
# should act the same as reloading built-in sys.
try:
from imp import reload
import exceptions
reload(exceptions)
except ImportError, e:
self.fail("reloading exceptions: %s" % e)
def raise_catch(self, exc, excname):
try:
raise exc, "spam"
except exc, err:
buf1 = str(err)
try:
raise exc("spam")
except exc, err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec '/\n'
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 10000)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1 // 0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1 // 0
except Exception, e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self_):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertEqual(str(WindowsError(1001)),
"1001")
self.assertEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.assertEqual(WindowsError(1001, "message").errno, 22)
self.assertEqual(WindowsError(1001, "message").winerror, 1001)
@ignore_deprecation_warnings
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'message' : '', 'args' : ()}),
(BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
(BaseException, ('foo',),
{'message' : 'foo', 'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'message' : '', 'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'message' : '', 'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'message' : '', 'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'message' : '', 'args' : (),}),
(UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', u'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : u'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', '\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : '\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
{'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
'object' : u'\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
raise exc(*args)
except BaseException, e:
if type(e) is not exc:
raise
# Verify module name
self.assertEqual(type(e).__module__, 'exceptions')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
self.assertEqual(repr(getattr(e, checkArgName)),
repr(expected[checkArgName]),
'exception "%s", attribute "%s"' %
(repr(e), checkArgName))
# test for pickling support
for p in pickle, cPickle:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
new = p.loads(p.dumps(e, protocol))
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s"' %
(e, checkArgName))
def testDeprecatedMessageAttribute(self):
# Accessing BaseException.message and relying on its value set by
# BaseException.__init__ triggers a deprecation warning.
exc = BaseException("foo")
with check_warnings(("BaseException.message has been deprecated "
"as of Python 2.6", DeprecationWarning)) as w:
self.assertEqual(exc.message, "foo")
self.assertEqual(len(w.warnings), 1)
def testRegularMessageAttribute(self):
# Accessing BaseException.message after explicitly setting a value
# for it does not trigger a deprecation warning.
exc = BaseException("foo")
exc.message = "bar"
with check_warnings(quiet=True) as w:
self.assertEqual(exc.message, "bar")
self.assertEqual(len(w.warnings), 0)
# Deleting the message is supported, too.
del exc.message
with self.assertRaises(AttributeError):
exc.message
@ignore_deprecation_warnings
def testPickleMessageAttribute(self):
# Pickling with message attribute must work, as well.
e = Exception("foo")
f = Exception("foo")
f.message = "bar"
for p in pickle, cPickle:
ep = p.loads(p.dumps(e))
self.assertEqual(ep.message, "foo")
fp = p.loads(p.dumps(f))
self.assertEqual(fp.message, "bar")
@ignore_deprecation_warnings
def testSlicing(self):
# Test that you can slice an exception directly instead of requiring
# going through the 'args' attribute.
args = (1, 2, 3)
exc = BaseException(*args)
self.assertEqual(exc[:], args)
self.assertEqual(exc.args[:], args)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
# The test prints an unraisable recursion error when
# doing "except ValueError", this is because subclass
# checking has recursion checking too.
with captured_output("stderr"):
try:
g()
except RuntimeError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def testUnicodeStrUsage(self):
# Make sure both instances and classes have a str and unicode
# representation.
self.assertTrue(str(Exception))
self.assertTrue(unicode(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(unicode(Exception(u'a')))
self.assertTrue(unicode(Exception(u'\xe1')))
def testUnicodeChangeAttributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', u'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError(u'xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception):
__metaclass__ = Meta
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException, e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
with captured_output("stderr") as stderr:
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(e is RuntimeError, e)
self.assertIn("maximum recursion depth exceeded", str(v))
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
def test_bad_exception_clearing(self):
# See issue 16445: use of Py_XDECREF instead of Py_CLEAR in
# BaseException_set_message gave a possible way to segfault the
# interpreter.
class Nasty(str):
def __del__(message):
del e.message
e = ValueError(Nasty("msg"))
e.args = ()
del e.message
# Helper class used by TestSameStrAndUnicodeMsg
class ExcWithOverriddenStr(Exception):
"""Subclass of Exception that accepts a keyword 'msg' arg that is
returned by __str__. 'msg' won't be included in self.args"""
def __init__(self, *args, **kwargs):
self.msg = kwargs.pop('msg') # msg should always be present
super(ExcWithOverriddenStr, self).__init__(*args, **kwargs)
def __str__(self):
return self.msg
class TestSameStrAndUnicodeMsg(unittest.TestCase):
"""unicode(err) should return the same message of str(err). See #6108"""
def check_same_msg(self, exc, msg):
"""Helper function that checks if str(exc) == unicode(exc) == msg"""
self.assertEqual(str(exc), msg)
self.assertEqual(str(exc), unicode(exc))
def test_builtin_exceptions(self):
"""Check same msg for built-in exceptions"""
# These exceptions implement a __str__ method that uses the args
# to create a better error message. unicode(e) should return the same
# message.
exceptions = [
SyntaxError('invalid syntax', ('<string>', 1, 3, '2+*3')),
IOError(2, 'No such file or directory'),
KeyError('both should have the same quotes'),
UnicodeDecodeError('ascii', '\xc3\xa0', 0, 1,
'ordinal not in range(128)'),
UnicodeEncodeError('ascii', u'\u1234', 0, 1,
'ordinal not in range(128)')
]
for exception in exceptions:
self.assertEqual(str(exception), unicode(exception))
def test_0_args(self):
"""Check same msg for Exception with 0 args"""
# str() and unicode() on an Exception with no args should return an
# empty string
self.check_same_msg(Exception(), '')
def test_0_args_with_overridden___str__(self):
"""Check same msg for exceptions with 0 args and overridden __str__"""
# str() and unicode() on an exception with overridden __str__ that
# returns an ascii-only string should return the same string
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr(msg=msg), msg)
# if __str__ returns a non-ascii unicode string str() should fail
# but unicode() should return the unicode string
e = ExcWithOverriddenStr(msg=u'f\xf6\xf6') # no args
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg(self):
"""Check same msg for Exceptions with 1 arg"""
for arg in ('foo', u'foo'):
self.check_same_msg(Exception(arg), arg)
# if __str__ is not overridden and self.args[0] is a non-ascii unicode
# string, str() should try to return str(self.args[0]) and fail.
# unicode() should return unicode(self.args[0]) and succeed.
e = Exception(u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and 1 arg"""
# when __str__ is overridden and __unicode__ is not implemented
# unicode(e) returns the same as unicode(e.__str__()).
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr('arg', msg=msg), msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed.
e = ExcWithOverriddenStr('arg', msg=u'f\xf6\xf6') # 1 arg
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_many_args(self):
"""Check same msg for Exceptions with many args"""
argslist = [
(3, 'foo'),
(1, u'foo', 'bar'),
(4, u'f\xf6\xf6', u'bar', 'baz')
]
# both str() and unicode() should return a repr() of the args
for args in argslist:
self.check_same_msg(Exception(*args), repr(args))
def test_many_args_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and many args"""
# if __str__ returns an ascii string / ascii unicode string
# both str() and unicode() should succeed
for msg in ('foo', u'foo'):
e = ExcWithOverriddenStr('arg1', u'arg2', u'f\xf6\xf6', msg=msg)
self.check_same_msg(e, msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed
e = ExcWithOverriddenStr('arg1', u'f\xf6\xf6', u'arg3', # 3 args
msg=u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
def test_main():
run_unittest(ExceptionTests, TestSameStrAndUnicodeMsg)
if __name__ == '__main__':
test_main()
| 0.00565 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.common.types import LibcloudError
API_HOST = 'vapi.vr.org'
class HostVirtualException(LibcloudError):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<HostVirtualException in %d: %s>' % (self.code, self.message)
class HostVirtualConnection(ConnectionKey):
host = API_HOST
allow_insecure = False
def add_default_params(self, params):
params['key'] = self.key
return params
class HostVirtualResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if not self.body:
return None
data = json.loads(self.body)
return data
def parse_error(self):
data = self.parse_body()
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('%(code)s:%(message)s' % (data['error']))
elif self.status == httplib.PRECONDITION_FAILED:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
elif self.status == httplib.NOT_FOUND:
raise HostVirtualException(
data['error']['code'], data['error']['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Checker for file headers
~~~~~~~~~~~~~~~~~~~~~~~~
Make sure each Python file has a correct file header
including copyright and license information.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import io
import os
import re
import sys
import getopt
from os.path import join, splitext, abspath
checkers = {}
def checker(*suffixes, **kwds):
only_pkg = kwds.pop('only_pkg', False)
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
func.only_pkg = only_pkg
return func
return deco
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2014 by '
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
"informations"] # ALLOW-MISSPELLING
@checker('.py')
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
except SyntaxError as err:
yield 0, "not compilable: %s" % err
@checker('.py')
def check_style_and_encoding(fn, lines):
encoding = 'ascii'
for lno, line in enumerate(lines):
if len(line) > 90:
yield lno+1, "line too long"
m = not_ix_re.search(line)
if m:
yield lno+1, '"' + m.group() + '"'
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
if lno < 2:
co = coding_re.search(line)
if co:
encoding = co.group(1)
try:
line.decode(encoding)
except AttributeError:
# Python 3 - encoding was already checked
pass
except UnicodeDecodeError as err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
except LookupError as err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@checker('.py', only_pkg=True)
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == ['#!/usr/bin/env python\n']:
lines = lines[1:]
c = 2
llist = []
docopen = False
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l == '# -*- coding: rot13 -*-\n':
# special-case pony package
return
elif l != '# -*- coding: utf-8 -*-\n':
yield 1, "missing coding declaration"
elif lno == 1:
if l != '"""\n' and l != 'r"""\n':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == '"""\n':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != "\n" and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:-1] == modname:
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * "~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
yield 0, "missing end and/or start of docstring..."
# check for copyright and license fields
license = llist[-2:-1]
if license != [" :license: BSD, see LICENSE for details.\n"]:
yield 0, "no correct license info"
ci = -3
copyright = llist[ci:ci+1]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info"
def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
opts = {}
for opt, val in gopts:
if opt == '-i':
val = abspath(val)
opts.setdefault(opt, []).append(val)
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
return 2
verbose = '-v' in opts
num = 0
out = io.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
if '.hg' in dirs:
dirs.remove('.hg')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
# XXX: awkward: for the Makefile call: don't check non-package
# files for file headers
in_pocoo_pkg = root.startswith('./pygments')
for fn in files:
fn = join(root, fn)
if fn[:2] == './': fn = fn[2:]
if '-i' in opts and abspath(fn) in opts['-i']:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print("Checking %s..." % fn)
try:
f = open(fn, 'r')
lines = list(f)
except (IOError, OSError) as err:
print("%s: cannot open: %s" % (fn, err))
num += 1
continue
for checker in checkerlist:
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
print(u"%s:%d: %s" % (fn, lno, msg), file=out)
num += 1
if verbose:
print()
if num == 0:
print("No errors found.")
else:
print(out.getvalue().rstrip('\n'))
print("%d error%s found." % (num, num > 1 and "s" or ""))
return int(num > 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 0.001748 |
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Crocodile source scanners."""
import re
class Scanner(object):
"""Generic source scanner."""
def __init__(self):
"""Constructor."""
self.re_token = re.compile('#')
self.comment_to_eol = ['#']
self.comment_start = None
self.comment_end = None
def ScanLines(self, lines):
"""Scans the lines for executable statements.
Args:
lines: Iterator returning source lines.
Returns:
An array of line numbers which are executable.
"""
exe_lines = []
lineno = 0
in_string = None
in_comment = None
comment_index = None
for line in lines:
lineno += 1
in_string_at_start = in_string
for t in self.re_token.finditer(line):
tokenstr = t.groups()[0]
if in_comment:
# Inside a multi-line comment, so look for end token
if tokenstr == in_comment:
in_comment = None
# Replace comment with spaces
line = (line[:comment_index]
+ ' ' * (t.end(0) - comment_index)
+ line[t.end(0):])
elif in_string:
# Inside a string, so look for end token
if tokenstr == in_string:
in_string = None
elif tokenstr in self.comment_to_eol:
# Single-line comment, so truncate line at start of token
line = line[:t.start(0)]
break
elif tokenstr == self.comment_start:
# Multi-line comment start - end token is comment_end
in_comment = self.comment_end
comment_index = t.start(0)
else:
# Starting a string - end token is same as start
in_string = tokenstr
# If still in comment at end of line, remove comment
if in_comment:
line = line[:comment_index]
# Next line, delete from the beginnine
comment_index = 0
# If line-sans-comments is not empty, claim it may be executable
if line.strip() or in_string_at_start:
exe_lines.append(lineno)
# Return executable lines
return exe_lines
def Scan(self, filename):
"""Reads the file and scans its lines.
Args:
filename: Path to file to scan.
Returns:
An array of line numbers which are executable.
"""
# TODO: All manner of error checking
f = None
try:
f = open(filename, 'rt')
return self.ScanLines(f)
finally:
if f:
f.close()
class PythonScanner(Scanner):
"""Python source scanner."""
def __init__(self):
"""Constructor."""
Scanner.__init__(self)
# TODO: This breaks for strings ending in more than 2 backslashes. Need
# a pattern which counts only an odd number of backslashes, so the last
# one thus escapes the quote.
self.re_token = re.compile(r'(#|\'\'\'|"""|(?<!(?<!\\)\\)["\'])')
self.comment_to_eol = ['#']
self.comment_start = None
self.comment_end = None
class CppScanner(Scanner):
"""C / C++ / ObjC / ObjC++ source scanner."""
def __init__(self):
"""Constructor."""
Scanner.__init__(self)
# TODO: This breaks for strings ending in more than 2 backslashes. Need
# a pattern which counts only an odd number of backslashes, so the last
# one thus escapes the quote.
self.re_token = re.compile(r'(^\s*#|//|/\*|\*/|(?<!(?<!\\)\\)["\'])')
# TODO: Treat '\' at EOL as a token, and handle it as continuing the
# previous line. That is, if in a comment-to-eol, this line is a comment
# too.
# Note that we treat # at beginning of line as a comment, so that we ignore
# preprocessor definitions
self.comment_to_eol = ['//', '#']
self.comment_start = '/*'
self.comment_end = '*/'
def ScanFile(filename, language):
"""Scans a file for executable lines.
Args:
filename: Path to file to scan.
language: Language for file ('C', 'C++', 'python', 'ObjC', 'ObjC++')
Returns:
A list of executable lines, or an empty list if the file was not a handled
language.
"""
if language == 'python':
return PythonScanner().Scan(filename)
elif language in ['C', 'C++', 'ObjC', 'ObjC++']:
return CppScanner().Scan(filename)
# Something we don't handle
return []
| 0.007985 |
# -*- coding: utf-8 -*-
"""
Models for Student Identity Verification
This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
import functools
import json
import logging
from datetime import datetime, timedelta
from email.utils import formatdate
import pytz
import requests
import uuid
from lazy import lazy
from opaque_keys.edx.keys import UsageKey
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.dispatch import receiver
from django.db import models, transaction
from django.utils.translation import ugettext as _, ugettext_lazy
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from simple_history.models import HistoricalRecords
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from model_utils.models import StatusModel, TimeStampedModel
from model_utils import Choices
from lms.djangoapps.verify_student.ssencrypt import (
random_aes_key, encrypt_and_encode,
generate_signed_message, rsa_encrypt
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule_django.models import CourseKeyField
from microsite_configuration.templatetags.microsite import platform_name
log = logging.getLogger(__name__)
def generateUUID(): # pylint: disable=invalid-name
""" Utility function; generates UUIDs """
return str(uuid.uuid4())
class VerificationException(Exception):
pass
def status_before_must_be(*valid_start_statuses):
"""
Helper decorator with arguments to make sure that an object with a `status`
attribute is in one of a list of acceptable status states before a method
is called. You could use it in a class definition like:
@status_before_must_be("submitted", "approved", "denied")
def refund_user(self, user_id):
# Do logic here...
If the object has a status that is not listed when the `refund_user` method
is invoked, it will throw a `VerificationException`. This is just to avoid
distracting boilerplate when looking at a Model that needs to go through a
workflow process.
"""
def decorator_func(func):
"""
Decorator function that gets returned
"""
@functools.wraps(func)
def with_status_check(obj, *args, **kwargs):
if obj.status not in valid_start_statuses:
exception_msg = (
u"Error calling {} {}: status is '{}', must be one of: {}"
).format(func, obj, obj.status, valid_start_statuses)
raise VerificationException(exception_msg)
return func(obj, *args, **kwargs)
return with_status_check
return decorator_func
class PhotoVerification(StatusModel):
"""
Each PhotoVerification represents a Student's attempt to establish
their identity by uploading a photo of themselves and a picture ID. An
attempt actually has a number of fields that need to be filled out at
different steps of the approval process. While it's useful as a Django Model
for the querying facilities, **you should only edit a `PhotoVerification`
object through the methods provided**. Initialize them with a user:
attempt = PhotoVerification(user=user)
We track this attempt through various states:
`created`
Initial creation and state we're in after uploading the images.
`ready`
The user has uploaded their images and checked that they can read the
images. There's a separate state here because it may be the case that we
don't actually submit this attempt for review until payment is made.
`submitted`
Submitted for review. The review may be done by a staff member or an
external service. The user cannot make changes once in this state.
`must_retry`
We submitted this, but there was an error on submission (i.e. we did not
get a 200 when we POSTed to Software Secure)
`approved`
An admin or an external service has confirmed that the user's photo and
photo ID match up, and that the photo ID's name matches the user's.
`denied`
The request has been denied. See `error_msg` for details on why. An
admin might later override this and change to `approved`, but the
student cannot re-open this attempt -- they have to create another
attempt and submit it instead.
Because this Model inherits from StatusModel, we can also do things like::
attempt.status == PhotoVerification.STATUS.created
attempt.status == "created"
pending_requests = PhotoVerification.submitted.all()
"""
######################## Fields Set During Creation ########################
# See class docstring for description of status states
STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
user = models.ForeignKey(User, db_index=True)
# They can change their name later on, so we want to copy the value here so
# we always preserve what it was at the time they requested. We only copy
# this value during the mark_ready() step. Prior to that, you should be
# displaying the user's name from their user.profile.name.
name = models.CharField(blank=True, max_length=255)
# Where we place the uploaded image files (e.g. S3 URLs)
face_image_url = models.URLField(blank=True, max_length=255)
photo_id_image_url = models.URLField(blank=True, max_length=255)
# Randomly generated UUID so that external services can post back the
# results of checking a user's photo submission without use exposing actual
# user IDs or something too easily guessable.
receipt_id = models.CharField(
db_index=True,
default=generateUUID,
max_length=255,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# Indicates whether or not a user wants to see the verification status
# displayed on their dash. Right now, only relevant for allowing students
# to "dismiss" a failed midcourse reverification message
# TODO: This field is deprecated.
display = models.BooleanField(db_index=True, default=True)
######################## Fields Set When Submitting ########################
submitted_at = models.DateTimeField(null=True, db_index=True)
#################### Fields Set During Approval/Denial #####################
# If the review was done by an internal staff member, mark who it was.
reviewing_user = models.ForeignKey(
User,
db_index=True,
default=None,
null=True,
related_name="photo_verifications_reviewed"
)
# Mark the name of the service used to evaluate this attempt (e.g
# Software Secure).
reviewing_service = models.CharField(blank=True, max_length=255)
# If status is "denied", this should contain text explaining why.
error_msg = models.TextField(blank=True)
# Non-required field. External services can add any arbitrary codes as time
# goes on. We don't try to define an exhuastive list -- this is just
# capturing it so that we can later query for the common problems.
error_code = models.CharField(blank=True, max_length=50)
class Meta(object):
app_label = "verify_student"
abstract = True
ordering = ['-created_at']
##### Methods listed in the order you'd typically call them
@classmethod
def _earliest_allowed_date(cls):
"""
Returns the earliest allowed date given the settings
"""
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return datetime.now(pytz.UTC) - timedelta(days=days_good_for)
@classmethod
def user_is_verified(cls, user, earliest_allowed_date=None):
"""
Return whether or not a user has satisfactorily proved their identity.
Depending on the policy, this can expire after some period of time, so
a user might have to renew periodically.
This will check for the user's *initial* verification.
"""
return cls.objects.filter(
user=user,
status="approved",
created_at__gte=(earliest_allowed_date
or cls._earliest_allowed_date())
).exists()
@classmethod
def verification_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None):
"""
Check whether the user has a complete verification attempt that is
or *might* be good. This means that it's approved, been submitted,
or would have been submitted but had an non-user error when it was
being submitted.
It's basically any situation in which the user has signed off on
the contents of the attempt, and we have not yet received a denial.
This will check for the user's *initial* verification.
Arguments:
user:
earliest_allowed_date: earliest allowed date given in the
settings
queryset: If a queryset is provided, that will be used instead
of hitting the database.
Returns:
queryset: queryset of 'PhotoVerification' sorted by 'created_at' in
descending order.
"""
valid_statuses = ['submitted', 'approved', 'must_retry']
if queryset is None:
queryset = cls.objects.filter(user=user)
return queryset.filter(
status__in=valid_statuses,
created_at__gte=(
earliest_allowed_date
or cls._earliest_allowed_date()
)
).order_by('-created_at')
@classmethod
def user_has_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None):
"""
Check whether the user has an active or pending verification attempt
Returns:
bool: True or False according to existence of valid verifications
"""
return cls.verification_valid_or_pending(user, earliest_allowed_date, queryset).exists()
@classmethod
def active_for_user(cls, user):
"""
Return the most recent PhotoVerification that is marked ready (i.e. the
user has said they're set, but we haven't submitted anything yet).
This checks for the original verification.
"""
# This should only be one at the most, but just in case we create more
# by mistake, we'll grab the most recently created one.
active_attempts = cls.objects.filter(user=user, status='ready').order_by('-created_at')
if active_attempts:
return active_attempts[0]
else:
return None
@classmethod
def user_status(cls, user):
"""
Returns the status of the user based on their past verification attempts
If no such verification exists, returns 'none'
If verification has expired, returns 'expired'
If the verification has been approved, returns 'approved'
If the verification process is still ongoing, returns 'pending'
If the verification has been denied and the user must resubmit photos, returns 'must_reverify'
This checks initial verifications
"""
status = 'none'
error_msg = ''
if cls.user_is_verified(user):
status = 'approved'
elif cls.user_has_valid_or_pending(user):
# user_has_valid_or_pending does include 'approved', but if we are
# here, we know that the attempt is still pending
status = 'pending'
else:
# we need to check the most recent attempt to see if we need to ask them to do
# a retry
try:
attempts = cls.objects.filter(user=user).order_by('-updated_at')
attempt = attempts[0]
except IndexError:
# we return 'none'
return ('none', error_msg)
if attempt.created_at < cls._earliest_allowed_date():
return (
'expired',
_("Your {platform_name} verification has expired.").format(platform_name=platform_name())
)
# If someone is denied their original verification attempt, they can try to reverify.
if attempt.status == 'denied':
status = 'must_reverify'
if attempt.error_msg:
error_msg = attempt.parsed_error_msg()
return (status, error_msg)
@classmethod
def verification_for_datetime(cls, deadline, candidates):
"""Find a verification in a set that applied during a particular datetime.
A verification is considered "active" during a datetime if:
1) The verification was created before the datetime, and
2) The verification is set to expire after the datetime.
Note that verification status is *not* considered here,
just the start/expire dates.
If multiple verifications were active at the deadline,
returns the most recently created one.
Arguments:
deadline (datetime): The datetime at which the verification applied.
If `None`, then return the most recently created candidate.
candidates (list of `PhotoVerification`s): Potential verifications to search through.
Returns:
PhotoVerification: A photo verification that was active at the deadline.
If no verification was active, return None.
"""
if len(candidates) == 0:
return None
# If there's no deadline, then return the most recently created verification
if deadline is None:
return candidates[0]
# Otherwise, look for a verification that was in effect at the deadline,
# preferring recent verifications.
# If no such verification is found, implicitly return `None`
for verification in candidates:
if verification.active_at_datetime(deadline):
return verification
@property
def expiration_datetime(self):
"""Datetime that the verification will expire. """
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return self.created_at + timedelta(days=days_good_for)
def active_at_datetime(self, deadline):
"""Check whether the verification was active at a particular datetime.
Arguments:
deadline (datetime): The date at which the verification was active
(created before and expired after).
Returns:
bool
"""
return (
self.created_at < deadline and
self.expiration_datetime > deadline
)
def parsed_error_msg(self):
"""
Sometimes, the error message we've received needs to be parsed into
something more human readable
The default behavior is to return the current error message as is.
"""
return self.error_msg
@status_before_must_be("created")
def upload_face_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def upload_photo_id_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def mark_ready(self):
"""
Mark that the user data in this attempt is correct. In order to
succeed, the user must have uploaded the necessary images
(`face_image_url`, `photo_id_image_url`). This method will also copy
their name from their user profile. Prior to marking it ready, we read
this value directly from their profile, since they're free to change it.
This often happens because people put in less formal versions of their
name on signup, but realize they want something different to go on a
formal document.
Valid attempt statuses when calling this method:
`created`
Status after method completes: `ready`
Other fields that will be set by this method:
`name`
State Transitions:
`created` → `ready`
This is what happens when the user confirms to us that the pictures
they uploaded are good. Note that we don't actually do a submission
anywhere yet.
"""
# At any point prior to this, they can change their names via their
# student dashboard. But at this point, we lock the value into the
# attempt.
self.name = self.user.profile.name
self.status = "ready"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def approve(self, user_id=None, service=""):
"""
Approve this attempt. `user_id`
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `approved`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`
State Transitions:
`submitted` → `approved`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `approved`
No-op. First one to approve it wins.
`denied` → `approved`
This might happen if a staff member wants to override a decision
made by an external service or another staff member (say, in
response to a support request). In this case, the previous values
of `reviewed_by_user_id` and `reviewed_by_service` will be changed
to whoever is doing the approving, and `error_msg` will be reset.
The only record that this record was ever denied would be in our
logs. This should be a relatively rare occurence.
"""
# If someone approves an outdated version of this, the first one wins
if self.status == "approved":
return
log.info(u"Verification for user '{user_id}' approved by '{reviewer}'.".format(
user_id=self.user, reviewer=user_id
))
self.error_msg = "" # reset, in case this attempt was denied before
self.error_code = "" # reset, in case this attempt was denied before
self.reviewing_user = user_id
self.reviewing_service = service
self.status = "approved"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def deny(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Deny this attempt.
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `denied`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`,
`error_code`
State Transitions:
`submitted` → `denied`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `denied`
This might happen if a staff member wants to override a decision
made by an external service or another staff member, or just correct
a mistake made during the approval process. In this case, the
previous values of `reviewed_by_user_id` and `reviewed_by_service`
will be changed to whoever is doing the denying. The only record
that this record was ever approved would be in our logs. This should
be a relatively rare occurence.
`denied` → `denied`
Update the error message and reviewing_user/reviewing_service. Just
lets you amend the error message in case there were additional
details to be made.
"""
log.info(u"Verification for user '{user_id}' denied by '{reviewer}'.".format(
user_id=self.user, reviewer=reviewing_user
))
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "denied"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def system_error(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Mark that this attempt could not be completed because of a system error.
Status should be moved to `must_retry`. For example, if Software Secure
reported to us that they couldn't process our submission because they
couldn't decrypt the image we sent.
"""
if self.status in ["approved", "denied"]:
return # If we were already approved or denied, just leave it.
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "must_retry"
self.save()
class SoftwareSecurePhotoVerification(PhotoVerification):
"""
Model to verify identity using a service provided by Software Secure. Much
of the logic is inherited from `PhotoVerification`, but this class
encrypts the photos.
Software Secure (http://www.softwaresecure.com/) is a remote proctoring
service that also does identity verification. A student uses their webcam
to upload two images: one of their face, one of a photo ID. Due to the
sensitive nature of the data, the following security precautions are taken:
1. The snapshot of their face is encrypted using AES-256 in CBC mode. All
face photos are encypted with the same key, and this key is known to
both Software Secure and edx-platform.
2. The snapshot of a user's photo ID is also encrypted using AES-256, but
the key is randomly generated using pycrypto's Random. Every verification
attempt has a new key. The AES key is then encrypted using a public key
provided by Software Secure. We store only the RSA-encryped AES key.
Since edx-platform does not have Software Secure's private RSA key, it
means that we can no longer even read photo ID.
3. The encrypted photos are base64 encoded and stored in an S3 bucket that
edx-platform does not have read access to.
Note: this model handles *inital* verifications (which you must perform
at the time you register for a verified cert).
"""
# This is a base64.urlsafe_encode(rsa_encrypt(photo_id_aes_key), ss_pub_key)
# So first we generate a random AES-256 key to encrypt our photo ID with.
# Then we RSA encrypt it with Software Secure's public key. Then we base64
# encode that. The result is saved here. Actual expected length is 344.
photo_id_key = models.TextField(max_length=1024)
IMAGE_LINK_DURATION = 5 * 60 * 60 * 24 # 5 days in seconds
copy_id_photo_from = models.ForeignKey("self", null=True, blank=True)
@classmethod
def get_initial_verification(cls, user, earliest_allowed_date=None):
"""Get initial verification for a user with the 'photo_id_key'.
Arguments:
user(User): user object
earliest_allowed_date(datetime): override expiration date for initial verification
Return:
SoftwareSecurePhotoVerification (object) or None
"""
init_verification = cls.objects.filter(
user=user,
status__in=["submitted", "approved"],
created_at__gte=(
earliest_allowed_date or cls._earliest_allowed_date()
)
).exclude(photo_id_key='')
return init_verification.latest('created_at') if init_verification.exists() else None
@status_before_must_be("created")
def upload_face_image(self, img_data):
"""
Upload an image of the user's face to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using our FACE_IMAGE_AES_KEY, encode it with base64 and save it to S3.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
aes_key = aes_key_str.decode("hex")
s3_key = self._generate_s3_key("face")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
@status_before_must_be("created")
def upload_photo_id_image(self, img_data):
"""
Upload an the user's photo ID image to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using a randomly generated AES key, encode it with base64 and save it to
S3. The random key is also encrypted using Software Secure's public RSA
key and stored in our `photo_id_key` field.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
# fake photo id key is set only for initial verification
self.photo_id_key = 'fake-photo-id-key'
self.save()
return
aes_key = random_aes_key()
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_aes_key = rsa_encrypt(aes_key, rsa_key_str)
# Upload this to S3
s3_key = self._generate_s3_key("photo_id")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
# Update our record fields
self.photo_id_key = rsa_encrypted_aes_key.encode('base64')
self.save()
@status_before_must_be("must_retry", "ready", "submitted")
def submit(self, copy_id_photo_from=None):
"""
Submit our verification attempt to Software Secure for validation. This
will set our status to "submitted" if the post is successful, and
"must_retry" if the post fails.
Keyword Arguments:
copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
data from this attempt. This is used for reverification, in which new face photos
are sent with previously-submitted ID photos.
"""
try:
response = self.send_request(copy_id_photo_from=copy_id_photo_from)
if response.ok:
self.submitted_at = datetime.now(pytz.UTC)
self.status = "submitted"
self.save()
else:
self.status = "must_retry"
self.error_msg = response.text
self.save()
except Exception as error:
log.exception(error)
self.status = "must_retry"
self.save()
def parsed_error_msg(self):
"""
Parse the error messages we receive from SoftwareSecure
Error messages are written in the form:
`[{"photoIdReasons": ["Not provided"]}]`
Returns a list of error messages
"""
# Translates the category names and messages into something more human readable
message_dict = {
("photoIdReasons", "Not provided"): _("No photo ID was provided."),
("photoIdReasons", "Text not clear"): _("We couldn't read your name from your photo ID image."),
("generalReasons", "Name mismatch"): _("The name associated with your account and the name on your ID do not match."),
("userPhotoReasons", "Image not clear"): _("The image of your face was not clear."),
("userPhotoReasons", "Face out of view"): _("Your face was not visible in your self-photo."),
}
try:
msg_json = json.loads(self.error_msg)
msg_dict = msg_json[0]
msg = []
for category in msg_dict:
# find the messages associated with this category
category_msgs = msg_dict[category]
for category_msg in category_msgs:
msg.append(message_dict[(category, category_msg)])
return u", ".join(msg)
except (ValueError, KeyError):
# if we can't parse the message as JSON or the category doesn't
# match one of our known categories, show a generic error
log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg)
return _("There was an error verifying your ID photos.")
def image_url(self, name, override_receipt_id=None):
"""
We dynamically generate this, since we want it the expiration clock to
start when the message is created, not when the record is created.
Arguments:
name (str): Name of the image (e.g. "photo_id" or "face")
Keyword Arguments:
override_receipt_id (str): If provided, use this receipt ID instead
of the ID for this attempt. This is useful for reverification
where we need to construct a URL to a previously-submitted
photo ID image.
Returns:
string: The expiring URL for the image.
"""
s3_key = self._generate_s3_key(name, override_receipt_id=override_receipt_id)
return s3_key.generate_url(self.IMAGE_LINK_DURATION)
def _generate_s3_key(self, prefix, override_receipt_id=None):
"""
Generates a key for an s3 bucket location
Example: face/4dd1add9-6719-42f7-bea0-115c008c4fca
"""
conn = S3Connection(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_SECRET_KEY"]
)
bucket = conn.get_bucket(settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["S3_BUCKET"])
# Override the receipt ID if one is provided.
# This allow us to construct S3 keys to images submitted in previous attempts
# (used for reverification, where we send a new face photo with the same photo ID
# from a previous attempt).
receipt_id = self.receipt_id if override_receipt_id is None else override_receipt_id
key = Key(bucket)
key.key = "{}/{}".format(prefix, receipt_id)
return key
def _encrypted_user_photo_key_str(self):
"""
Software Secure needs to have both UserPhoto and PhotoID decrypted in
the same manner. So even though this is going to be the same for every
request, we're also using RSA encryption to encrypt the AES key for
faces.
"""
face_aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
face_aes_key = face_aes_key_str.decode("hex")
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)
return rsa_encrypted_face_aes_key.encode("base64")
def create_request(self, copy_id_photo_from=None):
"""
Construct the HTTP request to the photo verification service.
Keyword Arguments:
copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
data from this attempt. This is used for reverification, in which new face photos
are sent with previously-submitted ID photos.
Returns:
tuple of (header, body), where both `header` and `body` are dictionaries.
"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
secret_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
scheme = "https" if settings.HTTPS == "on" else "http"
callback_url = "{}://{}{}".format(
scheme, settings.SITE_NAME, reverse('verify_student_results_callback')
)
# If we're copying the photo ID image from a previous verification attempt,
# then we need to send the old image data with the correct image key.
photo_id_url = (
self.image_url("photo_id")
if copy_id_photo_from is None
else self.image_url("photo_id", override_receipt_id=copy_id_photo_from.receipt_id)
)
photo_id_key = (
self.photo_id_key
if copy_id_photo_from is None else
copy_id_photo_from.photo_id_key
)
body = {
"EdX-ID": str(self.receipt_id),
"ExpectedName": self.name,
"PhotoID": photo_id_url,
"PhotoIDKey": photo_id_key,
"SendResponseTo": callback_url,
"UserPhoto": self.image_url("face"),
"UserPhotoKey": self._encrypted_user_photo_key_str(),
}
headers = {
"Content-Type": "application/json",
"Date": formatdate(timeval=None, localtime=False, usegmt=True)
}
_message, _sig, authorization = generate_signed_message(
"POST", headers, body, access_key, secret_key
)
headers['Authorization'] = authorization
return headers, body
def request_message_txt(self):
"""
This is the body of the request we send across. This is never actually
used in the code, but exists for debugging purposes -- you can call
`print attempt.request_message_txt()` on the console and get a readable
rendering of the request that would be sent across, without actually
sending anything.
"""
headers, body = self.create_request()
header_txt = "\n".join(
"{}: {}".format(h, v) for h, v in sorted(headers.items())
)
body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')
return header_txt + "\n\n" + body_txt
def send_request(self, copy_id_photo_from=None):
"""
Assembles a submission to Software Secure and sends it via HTTPS.
Keyword Arguments:
copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
data from this attempt. This is used for reverification, in which new face photos
are sent with previously-submitted ID photos.
Returns:
request.Response
"""
# If AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING is True, we want to
# skip posting anything to Software Secure. We actually don't even
# create the message because that would require encryption and message
# signing that rely on settings.VERIFY_STUDENT values that aren't set
# in dev. So we just pretend like we successfully posted
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
fake_response = requests.Response()
fake_response.status_code = 200
return fake_response
headers, body = self.create_request(copy_id_photo_from=copy_id_photo_from)
response = requests.post(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
headers=headers,
data=json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
verify=False
)
log.info("Sent request to Software Secure for receipt ID %s.", self.receipt_id)
if copy_id_photo_from is not None:
log.info(
(
"Software Secure attempt with receipt ID %s used the same photo ID "
"data as the receipt with ID %s"
),
self.receipt_id, copy_id_photo_from.receipt_id
)
log.debug("Headers:\n{}\n\n".format(headers))
log.debug("Body:\n{}\n\n".format(body))
log.debug("Return code: {}".format(response.status_code))
log.debug("Return message:\n\n{}\n\n".format(response.text))
return response
@classmethod
def verification_status_for_user(cls, user, course_id, user_enrollment_mode):
"""
Returns the verification status for use in grade report.
"""
if user_enrollment_mode not in CourseMode.VERIFIED_MODES:
return 'N/A'
user_is_verified = cls.user_is_verified(user)
if not user_is_verified:
return 'Not ID Verified'
else:
return 'ID Verified'
class VerificationDeadline(TimeStampedModel):
"""
Represent a verification deadline for a particular course.
The verification deadline is the datetime after which
users are no longer allowed to submit photos for initial verification
in a course.
Note that this is NOT the same as the "upgrade" deadline, after
which a user is no longer allowed to upgrade to a verified enrollment.
If no verification deadline record exists for a course,
then that course does not have a deadline. This means that users
can submit photos at any time.
"""
class Meta(object):
app_label = "verify_student"
course_key = CourseKeyField(
max_length=255,
db_index=True,
unique=True,
help_text=ugettext_lazy(u"The course for which this deadline applies"),
)
deadline = models.DateTimeField(
help_text=ugettext_lazy(
u"The datetime after which users are no longer allowed "
u"to submit photos for verification."
)
)
# The system prefers to set this automatically based on default settings. But
# if the field is set manually we want a way to indicate that so we don't
# overwrite the manual setting of the field.
deadline_is_explicit = models.BooleanField(default=False)
# Maintain a history of changes to deadlines for auditing purposes
history = HistoricalRecords()
ALL_DEADLINES_CACHE_KEY = "verify_student.all_verification_deadlines"
@classmethod
def set_deadline(cls, course_key, deadline, is_explicit=False):
"""
Configure the verification deadline for a course.
If `deadline` is `None`, then the course will have no verification
deadline. In this case, users will be able to verify for the course
at any time.
Arguments:
course_key (CourseKey): Identifier for the course.
deadline (datetime or None): The verification deadline.
"""
if deadline is None:
VerificationDeadline.objects.filter(course_key=course_key).delete()
else:
record, created = VerificationDeadline.objects.get_or_create(
course_key=course_key,
defaults={"deadline": deadline, "deadline_is_explicit": is_explicit}
)
if not created:
record.deadline = deadline
record.deadline_is_explicit = is_explicit
record.save()
@classmethod
def deadlines_for_courses(cls, course_keys):
"""
Retrieve verification deadlines for particular courses.
Arguments:
course_keys (list): List of `CourseKey`s.
Returns:
dict: Map of course keys to datetimes (verification deadlines)
"""
all_deadlines = cache.get(cls.ALL_DEADLINES_CACHE_KEY)
if all_deadlines is None:
all_deadlines = {
deadline.course_key: deadline.deadline
for deadline in VerificationDeadline.objects.all()
}
cache.set(cls.ALL_DEADLINES_CACHE_KEY, all_deadlines)
return {
course_key: all_deadlines[course_key]
for course_key in course_keys
if course_key in all_deadlines
}
@classmethod
def deadline_for_course(cls, course_key):
"""
Retrieve the verification deadline for a particular course.
Arguments:
course_key (CourseKey): The identifier for the course.
Returns:
datetime or None
"""
try:
deadline = cls.objects.get(course_key=course_key)
return deadline.deadline
except cls.DoesNotExist:
return None
@receiver(models.signals.post_save, sender=VerificationDeadline)
@receiver(models.signals.post_delete, sender=VerificationDeadline)
def invalidate_deadline_caches(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cached verification deadline information. """
cache.delete(VerificationDeadline.ALL_DEADLINES_CACHE_KEY)
class VerificationCheckpoint(models.Model):
"""Represents a point at which a user is asked to re-verify his/her
identity.
Each checkpoint is uniquely identified by a
(course_id, checkpoint_location) tuple.
"""
course_id = CourseKeyField(max_length=255, db_index=True)
checkpoint_location = models.CharField(max_length=255)
photo_verification = models.ManyToManyField(SoftwareSecurePhotoVerification)
class Meta(object):
app_label = "verify_student"
unique_together = ('course_id', 'checkpoint_location')
def __unicode__(self):
"""
Unicode representation of the checkpoint.
"""
return u"{checkpoint} in {course}".format(
checkpoint=self.checkpoint_name,
course=self.course_id
)
@lazy
def checkpoint_name(self):
"""Lazy method for getting checkpoint name of reverification block.
Return location of the checkpoint if no related assessment found in
database.
"""
checkpoint_key = UsageKey.from_string(self.checkpoint_location)
try:
checkpoint_name = modulestore().get_item(checkpoint_key).related_assessment
except ItemNotFoundError:
log.warning(
u"Verification checkpoint block with location '%s' and course id '%s' "
u"not found in database.", self.checkpoint_location, unicode(self.course_id)
)
checkpoint_name = self.checkpoint_location
return checkpoint_name
def add_verification_attempt(self, verification_attempt):
"""Add the verification attempt in M2M relation of photo_verification.
Arguments:
verification_attempt(object): SoftwareSecurePhotoVerification object
Returns:
None
"""
self.photo_verification.add(verification_attempt) # pylint: disable=no-member
def get_user_latest_status(self, user_id):
"""Get the status of the latest checkpoint attempt of the given user.
Args:
user_id(str): Id of user
Returns:
VerificationStatus object if found any else None
"""
try:
return self.checkpoint_status.filter(user_id=user_id).latest()
except ObjectDoesNotExist:
return None
@classmethod
def get_or_create_verification_checkpoint(cls, course_id, checkpoint_location):
"""
Get or create the verification checkpoint for given 'course_id' and
checkpoint name.
Arguments:
course_id (CourseKey): CourseKey
checkpoint_location (str): Verification checkpoint location
Raises:
IntegrityError if create fails due to concurrent create.
Returns:
VerificationCheckpoint object if exists otherwise None
"""
with transaction.atomic():
checkpoint, __ = cls.objects.get_or_create(course_id=course_id, checkpoint_location=checkpoint_location)
return checkpoint
class VerificationStatus(models.Model):
"""This model is an append-only table that represents user status changes
during the verification process.
A verification status represents a user’s progress through the verification
process for a particular checkpoint.
"""
SUBMITTED_STATUS = "submitted"
APPROVED_STATUS = "approved"
DENIED_STATUS = "denied"
ERROR_STATUS = "error"
VERIFICATION_STATUS_CHOICES = (
(SUBMITTED_STATUS, SUBMITTED_STATUS),
(APPROVED_STATUS, APPROVED_STATUS),
(DENIED_STATUS, DENIED_STATUS),
(ERROR_STATUS, ERROR_STATUS)
)
checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="checkpoint_status")
user = models.ForeignKey(User)
status = models.CharField(choices=VERIFICATION_STATUS_CHOICES, db_index=True, max_length=32)
timestamp = models.DateTimeField(auto_now_add=True)
response = models.TextField(null=True, blank=True)
error = models.TextField(null=True, blank=True)
class Meta(object):
app_label = "verify_student"
get_latest_by = "timestamp"
verbose_name = "Verification Status"
verbose_name_plural = "Verification Statuses"
@classmethod
def add_verification_status(cls, checkpoint, user, status):
"""Create new verification status object.
Arguments:
checkpoint(VerificationCheckpoint): VerificationCheckpoint object
user(User): user object
status(str): Status from VERIFICATION_STATUS_CHOICES
Returns:
None
"""
cls.objects.create(checkpoint=checkpoint, user=user, status=status)
@classmethod
def add_status_from_checkpoints(cls, checkpoints, user, status):
"""Create new verification status objects for a user against the given
checkpoints.
Arguments:
checkpoints(list): list of VerificationCheckpoint objects
user(User): user object
status(str): Status from VERIFICATION_STATUS_CHOICES
Returns:
None
"""
for checkpoint in checkpoints:
cls.objects.create(checkpoint=checkpoint, user=user, status=status)
@classmethod
def get_user_status_at_checkpoint(cls, user, course_key, location):
"""
Get the user's latest status at the checkpoint.
Arguments:
user (User): The user whose status we are retrieving.
course_key (CourseKey): The identifier for the course.
location (UsageKey): The location of the checkpoint in the course.
Returns:
unicode or None
"""
try:
return cls.objects.filter(
user=user,
checkpoint__course_id=course_key,
checkpoint__checkpoint_location=unicode(location),
).latest().status
except cls.DoesNotExist:
return None
@classmethod
def get_user_attempts(cls, user_id, course_key, checkpoint_location):
"""
Get re-verification attempts against a user for a given 'checkpoint'
and 'course_id'.
Arguments:
user_id (str): User Id string
course_key (str): A CourseKey of a course
checkpoint_location (str): Verification checkpoint location
Returns:
Count of re-verification attempts
"""
return cls.objects.filter(
user_id=user_id,
checkpoint__course_id=course_key,
checkpoint__checkpoint_location=checkpoint_location,
status=cls.SUBMITTED_STATUS
).count()
@classmethod
def get_location_id(cls, photo_verification):
"""Get the location ID of reverification XBlock.
Args:
photo_verification(object): SoftwareSecurePhotoVerification object
Return:
Location Id of XBlock if any else empty string
"""
try:
verification_status = cls.objects.filter(checkpoint__photo_verification=photo_verification).latest()
return verification_status.checkpoint.checkpoint_location
except cls.DoesNotExist:
return ""
@classmethod
def get_all_checkpoints(cls, user_id, course_key):
"""Return dict of all the checkpoints with their status.
Args:
user_id(int): Id of user.
course_key(unicode): Unicode of course key
Returns:
dict: {checkpoint:status}
"""
all_checks_points = cls.objects.filter(
user_id=user_id, checkpoint__course_id=course_key
)
check_points = {}
for check in all_checks_points:
check_points[check.checkpoint.checkpoint_location] = check.status
return check_points
@classmethod
def cache_key_name(cls, user_id, course_key):
"""Return the name of the key to use to cache the current configuration
Args:
user_id(int): Id of user.
course_key(unicode): Unicode of course key
Returns:
Unicode cache key
"""
return u"verification.{}.{}".format(user_id, unicode(course_key))
@receiver(models.signals.post_save, sender=VerificationStatus)
@receiver(models.signals.post_delete, sender=VerificationStatus)
def invalidate_verification_status_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of VerificationStatus model. """
cache_key = VerificationStatus.cache_key_name(
instance.user.id,
unicode(instance.checkpoint.course_id)
)
cache.delete(cache_key)
# DEPRECATED: this feature has been permanently enabled.
# Once the application code has been updated in production,
# this table can be safely deleted.
class InCourseReverificationConfiguration(ConfigurationModel):
"""Configure in-course re-verification.
Enable or disable in-course re-verification feature.
When this flag is disabled, the "in-course re-verification" feature
will be disabled.
When the flag is enabled, the "in-course re-verification" feature
will be enabled.
"""
pass
class IcrvStatusEmailsConfiguration(ConfigurationModel):
"""Toggle in-course reverification (ICRV) status emails
Disabled by default. When disabled, ICRV status emails will not be sent.
When enabled, ICRV status emails are sent.
"""
pass
class SkippedReverification(models.Model):
"""Model for tracking skipped Reverification of a user against a specific
course.
If a user skipped a Reverification checkpoint for a specific course then in
future that user cannot see the reverification link.
"""
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="skipped_checkpoint")
created_at = models.DateTimeField(auto_now_add=True)
class Meta(object):
app_label = "verify_student"
unique_together = (('user', 'course_id'),)
@classmethod
@transaction.atomic
def add_skipped_reverification_attempt(cls, checkpoint, user_id, course_id):
"""Create skipped reverification object.
Arguments:
checkpoint(VerificationCheckpoint): VerificationCheckpoint object
user_id(str): User Id of currently logged in user
course_id(CourseKey): CourseKey
Returns:
None
"""
cls.objects.create(checkpoint=checkpoint, user_id=user_id, course_id=course_id)
@classmethod
def check_user_skipped_reverification_exists(cls, user_id, course_id):
"""Check existence of a user's skipped re-verification attempt for a
specific course.
Arguments:
user_id(str): user id
course_id(CourseKey): CourseKey
Returns:
Boolean
"""
has_skipped = cls.objects.filter(user_id=user_id, course_id=course_id).exists()
return has_skipped
@classmethod
def cache_key_name(cls, user_id, course_key):
"""Return the name of the key to use to cache the current configuration
Arguments:
user(User): user object
course_key(CourseKey): CourseKey
Returns:
string: cache key name
"""
return u"skipped_reverification.{}.{}".format(user_id, unicode(course_key))
@receiver(models.signals.post_save, sender=SkippedReverification)
@receiver(models.signals.post_delete, sender=SkippedReverification)
def invalidate_skipped_verification_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""Invalidate the cache of skipped verification model. """
cache_key = SkippedReverification.cache_key_name(
instance.user.id,
unicode(instance.course_id)
)
cache.delete(cache_key)
| 0.001793 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class crm_team(osv.Model):
_name = "crm.team"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Team"
_order = "name"
_period_number = 5
def _get_default_team_id(self, cr, uid, context=None, user_id=None):
if context is None:
context = {}
if user_id is None:
user_id = uid
team_ids = self.search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', 'in', user_id)], limit=1, context=context)
team_id = team_ids[0] if team_ids else False
if not team_id and context.get('default_team_id'):
team_id = context['default_team_id']
if not team_id:
team_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'sales_team.team_sales_department')
return team_id
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"false, it will allow you to hide the sales team without removing it."),
'company_id': fields.many2one('res.company', 'Company'),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids': fields.one2many('res.users', 'sale_team_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"),
'working_hours': fields.float('Working Hours', digits=(16, 2)),
'color': fields.integer('Color Index'),
}
_defaults = {
'active': 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'crm.team', context=context),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
context['mail_create_nosubscribe'] = True
return super(crm_team, self).create(cr, uid, values, context=context)
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'team_id': fields.many2one('crm.team', 'Sales Team', oldname='section_id'),
}
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'sale_team_id': fields.many2one('crm.team','Sales Team')
}
| 0.005714 |
from sympy.core.assumptions import StdFactKB
from sympy.core import S, Pow, Symbol
from sympy.core.expr import AtomicExpr
from sympy.core.compatibility import range
from sympy import diff as df, sqrt, ImmutableMatrix as Matrix
from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.basisdependent import (BasisDependent, BasisDependentAdd,
BasisDependentMul, BasisDependentZero)
from sympy.vector.dyadic import BaseDyadic, Dyadic, DyadicAdd
class Vector(BasisDependent):
"""
Super class for all Vector classes.
Ideally, neither this class nor any of its subclasses should be
instantiated by the user.
"""
is_Vector = True
_op_priority = 12.0
@property
def components(self):
"""
Returns the components of this vector in the form of a
Python dictionary mapping BaseVector instances to the
corresponding measure numbers.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> C = CoordSysCartesian('C')
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.components
{C.i: 3, C.j: 4, C.k: 5}
"""
# The '_components' attribute is defined according to the
# subclass of Vector the instance belongs to.
return self._components
def magnitude(self):
"""
Returns the magnitude of this vector.
"""
return sqrt(self & self)
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self / self.magnitude()
def dot(self, other):
"""
Returns the dot product of this Vector, either with another
Vector, or a Dyadic, or a Del operator.
If 'other' is a Vector, returns the dot product scalar (Sympy
expression).
If 'other' is a Dyadic, the dot product is returned as a Vector.
If 'other' is an instance of Del, returns the directional
derivate operator as a Python function. If this function is
applied to a scalar expression, it returns the directional
derivative of the scalar field wrt this Vector.
Parameters
==========
other: Vector/Dyadic/Del
The Vector or Dyadic we are dotting with, or a Del operator .
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> C = CoordSysCartesian('C')
>>> C.i.dot(C.j)
0
>>> C.i & C.i
1
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.dot(C.k)
5
>>> (C.i & C.delop)(C.x*C.y*C.z)
C.y*C.z
>>> d = C.i.outer(C.i)
>>> C.i.dot(d)
C.i
"""
from sympy.vector.functions import express
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Vector.zero
outvec = Vector.zero
for k, v in other.components.items():
vect_dot = k.args[0].dot(self)
outvec += vect_dot * v * k.args[1]
return outvec
from sympy.vector.deloperator import Del
if not isinstance(other, Vector) and not isinstance(other, Del):
raise TypeError(str(other) + " is not a vector, dyadic or " +
"del operator")
# Check if the other is a del operator
if isinstance(other, Del):
def directional_derivative(field):
field = express(field, other.system, variables=True)
out = self.dot(other._i) * df(field, other._x)
out += self.dot(other._j) * df(field, other._y)
out += self.dot(other._k) * df(field, other._z)
if out == 0 and isinstance(field, Vector):
out = Vector.zero
return out
return directional_derivative
if isinstance(self, VectorZero) or isinstance(other, VectorZero):
return S(0)
v1 = express(self, other._sys)
v2 = express(other, other._sys)
dotproduct = S(0)
for x in other._sys.base_vectors():
dotproduct += (v1.components.get(x, 0) *
v2.components.get(x, 0))
return dotproduct
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product of this Vector with another Vector or
Dyadic instance.
The cross product is a Vector, if 'other' is a Vector. If 'other'
is a Dyadic, this returns a Dyadic instance.
Parameters
==========
other: Vector/Dyadic
The Vector or Dyadic we are crossing with.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> C = CoordSysCartesian('C')
>>> C.i.cross(C.j)
C.k
>>> C.i ^ C.i
0
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v ^ C.i
5*C.j + (-4)*C.k
>>> d = C.i.outer(C.i)
>>> C.j.cross(d)
(-1)*(C.k|C.i)
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Dyadic.zero
outdyad = Dyadic.zero
for k, v in other.components.items():
cross_product = self.cross(k.args[0])
outer = cross_product.outer(k.args[1])
outdyad += v * outer
return outdyad
elif not isinstance(other, Vector):
raise TypeError(str(other) + " is not a vector")
elif (isinstance(self, VectorZero) or
isinstance(other, VectorZero)):
return Vector.zero
# Compute cross product
def _det(mat):
"""This is needed as a little method for to find the determinant
of a list in python.
SymPy's Matrix won't take in Vector, so need a custom function.
The user shouldn't be calling this.
"""
return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] *
mat[2][1]) +
mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *
mat[2][2]) +
mat[0][2] * (mat[1][0] * mat[2][1] - mat[1][1] *
mat[2][0]))
outvec = Vector.zero
for system, vect in other.separate().items():
tempi = system.i
tempj = system.j
tempk = system.k
tempm = [[tempi, tempj, tempk],
[self & tempi, self & tempj, self & tempk],
[vect & tempi, vect & tempj, vect & tempk]]
outvec += _det(tempm)
return outvec
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def outer(self, other):
"""
Returns the outer product of this vector with another, in the
form of a Dyadic instance.
Parameters
==========
other : Vector
The Vector with respect to which the outer product is to
be computed.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> N.i.outer(N.j)
(N.i|N.j)
"""
# Handle the special cases
if not isinstance(other, Vector):
raise TypeError("Invalid operand for outer product")
elif (isinstance(self, VectorZero) or
isinstance(other, VectorZero)):
return Dyadic.zero
# Iterate over components of both the vectors to generate
# the required Dyadic instance
args = []
for k1, v1 in self.components.items():
for k2, v2 in other.components.items():
args.append((v1 * v2) * BaseDyadic(k1, k2))
return DyadicAdd(*args)
def projection(self, other, scalar=False):
"""
Returns the vector or scalar projection of the 'other' on 'self'.
Examples
========
>>> from sympy.vector.coordsysrect import CoordSysCartesian
>>> from sympy.vector.vector import Vector, BaseVector
>>> C = CoordSysCartesian('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + j + k
>>> v2 = 3*i + 4*j
>>> v1.projection(v2)
7/3*C.i + 7/3*C.j + 7/3*C.k
>>> v1.projection(v2, scalar=True)
7/3
"""
if self.equals(Vector.zero):
return S.zero if scalar else Vector.zero
if scalar:
return self.dot(other) / self.dot(self)
else:
return self.dot(other) / self.dot(self) * self
def __or__(self, other):
return self.outer(other)
__or__.__doc__ = outer.__doc__
def to_matrix(self, system):
"""
Returns the matrix form of this vector with respect to the
specified coordinate system.
Parameters
==========
system : CoordSysCartesian
The system wrt which the matrix form is to be computed
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> C = CoordSysCartesian('C')
>>> from sympy.abc import a, b, c
>>> v = a*C.i + b*C.j + c*C.k
>>> v.to_matrix(C)
Matrix([
[a],
[b],
[c]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
system.base_vectors()])
def separate(self):
"""
The constituents of this vector in different coordinate systems,
as per its definition.
Returns a dict mapping each CoordSysCartesian to the corresponding
constituent Vector.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> R1 = CoordSysCartesian('R1')
>>> R2 = CoordSysCartesian('R2')
>>> v = R1.i + R2.i
>>> v.separate() == {R1: R1.i, R2: R2.i}
True
"""
parts = {}
for vect, measure in self.components.items():
parts[vect.system] = (parts.get(vect.system, Vector.zero) +
vect * measure)
return parts
class BaseVector(Vector, AtomicExpr):
"""
Class to denote a base vector.
Unicode pretty forms in Python 2 should use the prefix ``u``.
"""
def __new__(cls, name, index, system, pretty_str, latex_str):
name = str(name)
pretty_str = str(pretty_str)
latex_str = str(latex_str)
# Verify arguments
if index not in range(0, 3):
raise ValueError("index must be 0, 1 or 2")
if not isinstance(system, CoordSysCartesian):
raise TypeError("system should be a CoordSysCartesian")
# Initialize an object
obj = super(BaseVector, cls).__new__(cls, Symbol(name), S(index),
system, Symbol(pretty_str),
Symbol(latex_str))
# Assign important attributes
obj._base_instance = obj
obj._components = {obj: S(1)}
obj._measure_number = S(1)
obj._name = name
obj._pretty_form = u'' + pretty_str
obj._latex_form = latex_str
obj._system = system
assumptions = {'commutative': True}
obj._assumptions = StdFactKB(assumptions)
# This attr is used for re-expression to one of the systems
# involved in the definition of the Vector. Applies to
# VectorMul and VectorAdd too.
obj._sys = system
return obj
@property
def system(self):
return self._system
def __str__(self, printer=None):
return self._name
@property
def free_symbols(self):
return {self}
__repr__ = __str__
_sympystr = __str__
class VectorAdd(BasisDependentAdd, Vector):
"""
Class to denote sum of Vector instances.
"""
def __new__(cls, *args, **options):
obj = BasisDependentAdd.__new__(cls, *args, **options)
return obj
def __str__(self, printer=None):
ret_str = ''
items = list(self.separate().items())
items.sort(key=lambda x: x[0].__str__())
for system, vect in items:
base_vects = system.base_vectors()
for x in base_vects:
if x in vect.components:
temp_vect = self.components[x] * x
ret_str += temp_vect.__str__(printer) + " + "
return ret_str[:-3]
__repr__ = __str__
_sympystr = __str__
class VectorMul(BasisDependentMul, Vector):
"""
Class to denote products of scalars and BaseVectors.
"""
def __new__(cls, *args, **options):
obj = BasisDependentMul.__new__(cls, *args, **options)
return obj
@property
def base_vector(self):
""" The BaseVector involved in the product. """
return self._base_instance
@property
def measure_number(self):
""" The scalar expression involved in the defition of
this VectorMul.
"""
return self._measure_number
class VectorZero(BasisDependentZero, Vector):
"""
Class to denote a zero vector
"""
_op_priority = 12.1
_pretty_form = u'0'
_latex_form = '\mathbf{\hat{0}}'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
def _vect_div(one, other):
""" Helper for division involving vectors. """
if isinstance(one, Vector) and isinstance(other, Vector):
raise TypeError("Cannot divide two vectors")
elif isinstance(one, Vector):
if other == S.Zero:
raise ValueError("Cannot divide a vector by zero")
return VectorMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Invalid division involving a vector")
Vector._expr_type = Vector
Vector._mul_func = VectorMul
Vector._add_func = VectorAdd
Vector._zero_func = VectorZero
Vector._base_func = BaseVector
Vector._div_helper = _vect_div
Vector.zero = VectorZero()
| 0.00014 |
#!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_certificatemanagementprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CertificateManagementProfile Avi RESTful Object
description:
- This module is used to configure CertificateManagementProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
name:
description:
- Name of the pki profile.
required: true
script_params:
description:
- List of customparams.
script_path:
description:
- Script_path of certificatemanagementprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CertificateManagementProfile object
avi_certificatemanagementprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_certificatemanagementprofile
"""
RETURN = '''
obj:
description: CertificateManagementProfile (api/certificatemanagementprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
name=dict(type='str', required=True),
script_params=dict(type='list',),
script_path=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'certificatemanagementprofile',
set([]))
if __name__ == '__main__':
main()
| 0.001322 |
# -*- coding: utf-8 -*-
"""Dump username, per-student anonymous id, and per-course anonymous id triples as CSV.
Give instructors easy access to the mapping from anonymized IDs to user IDs
with a simple Django management command to generate a CSV mapping. To run, use
the following:
./manage.py lms anonymized_id_mapping COURSE_ID
"""
import csv
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from student.models import anonymous_id_for_user
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
"""Add our handler to the space where django-admin looks up commands."""
# TODO: revisit now that rake has been deprecated
# It appears that with the way Rake invokes these commands, we can't
# have more than one arg passed through...annoying.
args = ("course_id", )
help = """Export a CSV mapping usernames to anonymized ids
Exports a CSV document mapping each username in the specified course to
the anonymized, unique user ID.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Usage: unique_id_mapping %s" %
" ".join(("<%s>" % arg for arg in Command.args)))
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
# Generate the output filename from the course ID.
# Change slashes to dashes first, and then append .csv extension.
output_filename = course_key.to_deprecated_string().replace('/', '-') + ".csv"
# Figure out which students are enrolled in the course
students = User.objects.filter(courseenrollment__course_id=course_key)
if len(students) == 0:
self.stdout.write("No students enrolled in %s" % course_key.to_deprecated_string())
return
# Write mapping to output file in CSV format with a simple header
try:
with open(output_filename, 'wb') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow((
"User ID",
"Per-Student anonymized user ID",
"Per-course anonymized user id"
))
for student in students:
csv_writer.writerow((
student.id,
anonymous_id_for_user(student, None),
anonymous_id_for_user(student, course_key)
))
except IOError:
raise CommandError("Error writing to file: %s" % output_filename)
| 0.001513 |
# -*- coding: utf8 -*-
from pycraft.common.util import names
class MetaData:
class Key:
FLAGS = 0
AIR = 1
NAMETAG = 2
SHOW_NAMETAG = 3
SILENT = 4
POTION_COLOR = 7
POTION_AMBIENT = 8
UNKNOWN_12 = 12
UNKNOWN_14 = 14
NO_AI = 15
PLAYER_FLAGS = 16
PLAYER_BED_POSITION = 17
UNKNOWN_18 = 18
UNKNOWN_19 = 19
UNKNOWN_20 = 20
UNKNOWN_21 = 21
UNKNOWN_16 = 48 # 16 + 32
UNKNOWN_17 = 49 # 17 + 32
UNKNOWN_17_2 = 50
UNKNOWN_17_3 = 51
class DataType:
BYTE = 0
SHORT = 1
INT = 2
FLOAT = 3
STRING = 4
SLOT = 5
POS = 6
ROTATION = 7
LONG = 8
TYPE_MAP = {
Key.NAMETAG : DataType.STRING,
Key.PLAYER_BED_POSITION : DataType.POS,
Key.FLAGS : DataType.BYTE,
Key.AIR : DataType.SHORT,
Key.SHOW_NAMETAG : DataType.BYTE,
Key.SILENT : DataType.BYTE,
Key.NO_AI : DataType.BYTE,
Key.POTION_COLOR : DataType.INT,
Key.POTION_AMBIENT : DataType.BYTE,
Key.PLAYER_FLAGS : DataType.BYTE,
Key.UNKNOWN_12 : DataType.BYTE,
Key.UNKNOWN_14 : DataType.BYTE,
Key.UNKNOWN_16 : DataType.SHORT,
Key.UNKNOWN_17 : DataType.ROTATION,
Key.UNKNOWN_17_2 : DataType.BYTE,
Key.UNKNOWN_17_3 : DataType.SHORT,
Key.UNKNOWN_18 : DataType.BYTE,
Key.UNKNOWN_19 : DataType.BYTE,
Key.UNKNOWN_20 : DataType.BYTE,
Key.UNKNOWN_21 : DataType.BYTE,
}
KEY_NAMES = names(Key)
def __init__(self, meta=None):
if meta == None:
self._values = {
self.Key.FLAGS : 0,
self.Key.AIR : 300,
self.Key.NAMETAG : '',
self.Key.SHOW_NAMETAG : 0,
self.Key.SILENT : 0,
self.Key.NO_AI : 0,
}
else:
self._values = dict(meta._values)
def clone(self):
o = self.__class__.__new__(self.__class__)
o._values = dict(self._values)
return o
def __str__(self):
return str(dict(
(self.KEY_NAMES[k], v) for k,v in self._values.items()))
def set(self, key, value, data_type=None):
# TODO: 見直す
if key == 17:
if data_type == self.DataType.ROTATION:
key = self.Key.UNKNOWN_17
if data_type == self.DataType.BYTE:
key = self.Key.UNKNOWN_17_2
if data_type == self.DataType.SHORT:
key = self.Key.UNKNOWN_17_3
if key == 16 and data_type == self.DataType.SHORT:
key = self.Key.UNKNOWN_16
assert data_type == None or data_type == self.TYPE_MAP[key]
self._values[key] = value
def get(self, key):
return (self.TYPE_MAP[key], self._values[key])
def keys(self):
return self._values.keys()
| 0.010702 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
def axis(command, axes=None):
""" axis(command, axes=None)
Convenience function to set axis properties. Note that all functionality
can also be applied via the properties of the Axis object.
Parameters
----------
command : string
The setting command to apply. See below.
axes : Axes instance
The axes to apply the setting to. Uses the current axes by default.
Possible string commands
------------------------
* off: hide the axis (Axes.axis.visible = False)
* on: show the axis (Axes.axis.visible = True)
* equal: make a circle be displayed circular (Axes.daspectAuto = False)
* auto: change the range for each dimension indepdently (Axes.daspectAuto = True)
* tight: show all data (Axes.SetLimits())
* ij: flip the y-axis (make second element of Axes.daspect negative)
* xy: (make all elements of Axes.daspect positive)
If you want to set an Axes' limits, use Axes.SetLimits(xlim, ylim, zlim).
"""
# Get axes
if axes is None:
axes = vv.gca()
if command == 'off':
axes.axis.visible = 0
elif command == 'on':
axes.axis.visible = 1
elif command == 'equal':
axes.daspectAuto = 0
elif command == 'auto':
axes.daspectAuto = 1
elif command == 'tight':
axes.SetLimits()
elif command == 'ij':
da = [abs(tmp) for tmp in axes.daspect]
axes.daspect = da[0], -abs(da[1]), da[2]
elif command == 'xy':
da = [abs(tmp) for tmp in axes.daspect]
axes.daspect = da[0], abs(da[1]), da[2]
else:
raise ValueError('Unknown command in vv.axis().')
| 0.006359 |
import math
import numpy as np
import functools
memoize = functools.lru_cache()
def save_array_as_image(array, path, mode):
from PIL import Image
height, width = array.shape
image = Image.frombuffer(mode, (width, height), np.ascontiguousarray(array).data, 'raw', mode, 0, 1)
image.save(path)
def read_image(path):
from PIL import Image
image = Image.open(path)
return image
def point(x, y, z=1, w=1):
return np.array([x, y, z, w], dtype=np.float32).T
def color(r, g, b, a=1):
return np.array([r, g, b, a], dtype=np.float32).T
def rotate_left(p):
return point(-p[1], p[0], 0)
def normalize(vector):
return vector / math.sqrt(sum(vector * vector))
def make_color_grammar():
import re
g = { }
g['digit'] = r'[0-9a-fA-F]'
g['ddigit'] = r'(?:{digit}{{2}})'.format(**g)
g['hex_color'] = r'^#?(?:(?P<double>{ddigit}{{3,4}})|(?P<single>{digit}{{3,4}}))$'.format(**g)
for key, value in g.items():
g[key] = re.compile(value)
return g
color_grammar = make_color_grammar()
def parse_color(string):
g = color_grammar
m = g['hex_color'].match(string)
if m is None:
return None
single = m.group('single')
if single is not None:
R, G, B = single[0], single[1], single[2]
A = 'f' if len(single) == 3 else single[3]
return tuple(int(2*v, 16) for v in (R, G, B, A))
double = m.group('double')
if double is not None:
R, G, B = double[0:2], double[2:4], double[4:6]
A = 'ff' if len(double) == 6 else double[6:8]
return tuple(int(v, 16) for v in (R, G, B, A))
def render_mesh(mesh, tile_shape=(16, 16), sample_rate=4):
from .Pixel import FloatPixel
from .TileCache import TileCache
from .capi import fill_micropolygon_mesh, generate_numpy_begin
cache = TileCache(tile_shape, sample_rate, FloatPixel)
mesh_bounds = mesh.outer_bounds
mesh_rows, mesh_columns = mesh.buffer.shape
for tile in cache.get_tiles_for_bounds(mesh_bounds):
tile_rows, tile_columns = tile.buffer.shape
mesh_buffer_ptr = generate_numpy_begin(mesh.buffer)
mesh_bounds_ptr = generate_numpy_begin(mesh.bounds)
coordinate_image_ptr = generate_numpy_begin(tile.coordinate_image)
tile_bounds = tile.bounds
tile_buffer_ptr = tile.buffer_ptr
tile_buffer_ptr = generate_numpy_begin(tile.buffer)
fill_micropolygon_mesh(
mesh_rows, mesh_columns,
mesh_buffer_ptr,
mesh_bounds_ptr,
tile_rows, tile_columns,
tile_bounds,
coordinate_image_ptr,
tile_buffer_ptr
)
return cache
def read_texture(path):
from PIL import Image
from .Pixel import FloatPixel
image = Image.open(path)
if image.mode != 'RGBA':
image = image.convert('RGBA')
out = np.array(image).astype(np.float32) / 255.
out = np.squeeze(out.view(FloatPixel))
return out
def n_wise(seq, n):
from itertools import islice, tee
iters = [ islice(g, i, None) for i, g in enumerate(tee(iter(seq), n)) ]
yield from zip(*iters)
def points_are_close(p0, p1, tol=1e-5):
return np.linalg.norm(p1 - p0) < tol
| 0.003698 |
# Copyright 2012, Tim Bielawa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sys
import time
from termios import tcflush, TCIFLUSH
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=dict()):
''' run the pause action module '''
duration_unit = 'minutes'
prompt = None
seconds = None
result = dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
)
# FIXME: not sure if we can get this info directly like this anymore?
#hosts = ', '.join(self.runner.host_set)
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
#prompt = "[%s]\nPress enter to continue:\n" % hosts
prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
pause_type = 'seconds'
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError, e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
#prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
# I have no idea what you're trying to do. But it's so wrong.
else:
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
#vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
# (self.pause_type, self.duration_unit, self.seconds, self.prompt))
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
# FIXME: this is all very broken right now, as prompting from the worker side
# is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
# probably should not be run through the executor engine at all. Also, ctrl+c
# is now captured on the parent thread, so it can't be caught here via the
# KeyboardInterrupt exception.
try:
if not pause_type == 'prompt':
print "(^C-c = continue early, ^C-a = abort)"
#print("[%s]\nPausing for %s seconds" % (hosts, seconds))
print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
time.sleep(seconds)
else:
# Clear out any unflushed buffered input which would
# otherwise be consumed by raw_input() prematurely.
#tcflush(sys.stdin, TCIFLUSH)
result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
except KeyboardInterrupt:
while True:
print '\nAction? (a)bort/(c)ontinue: '
c = getch()
if c == 'c':
# continue playbook evaluation
break
elif c == 'a':
# abort further playbook evaluation
raise ae('user requested abort!')
finally:
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
| 0.007797 |
#! /usr/bin/env python
"""Show file statistics by extension."""
from __future__ import print_function
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % arg)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = sorted(os.listdir(dir))
except os.error as err:
sys.stderr.write("Can't list %s: %s\n" % (dir, err))
self.addstats("<dir>", "unlistable", 1)
return
for name in names:
if name.startswith(".#"):
continue # Skip CVS temp files
if name.endswith("~"):
continue# Skip Emacs backup files
full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, filename):
head, ext = os.path.splitext(filename)
head, base = os.path.split(filename)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
f = open(filename, "rb")
except IOError as err:
sys.stderr.write("Can't open %s: %s\n" % (filename, err))
self.addstats(ext, "unopenable", 1)
return
data = f.read()
f.close()
self.addstats(ext, "bytes", len(data))
if b'\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
#self.addstats(ext, "chars", len(data))
lines = data.splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = sorted(self.stats.keys())
# Get the column keys
columns = {}
for ext in exts:
columns.update(self.stats[ext])
cols = sorted(columns.keys())
colwidth = {}
colwidth["ext"] = max([len(ext) for ext in exts])
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print("%*s" % (colwidth[col], col), end=" ")
print()
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print("%*s" % (colwidth[col], value), end=" ")
print()
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
| 0.002281 |
"""
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# We can just silently allow import failures to pass here. If we
# got to this point it means that ``import pip._vendor.whatever``
# failed and so did ``import whatever``. Since we're importing this
# upfront in an attempt to alias imports, not erroring here will
# just mean we get a regular import error whenever pip *actually*
# tries to import one of these modules to use it, which actually
# gives us a better error message than we would have otherwise
# gotten.
pass
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
| 0 |
# -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
import pilas
from pilas.actores import Actor
class Mapa(Actor):
"""Representa mapas creados a partir de imagenes mas pequeñas.
Este actor te permite crear escenarios tipo ``tiles``, una técnica
de contrucción de escenarios muy popular en los videojuegos.
Puedes crear un actor a partir de una grilla, e indicando cada
uno los bloques o simplemente usando un programa externo llamado
**tiled** (ver http://www.mapeditor.org).
Por ejemplo, para crear un mapa desde un archivo del programa
**tiled** puedes escribir:
>>> mapa = pilas.actores.Mapa('untitled2.tmx')
"""
def __init__(self, grilla_o_mapa=None, x=0, y=0, restitucion=0.56):
Actor.__init__(self, 'invisible.png', x, y)
self.restitucion = restitucion
self.figuras = []
self.bloques = []
if not grilla_o_mapa:
grilla_o_mapa = grilla = pilas.imagenes.cargar_grilla("grillas/plataformas_10_10.png", 10, 10)
self.grilla_o_mapa = grilla_o_mapa
if isinstance(grilla_o_mapa, str):
self._cargar_mapa(grilla_o_mapa)
else:
self.grilla = grilla_o_mapa
self._ancho_cuadro = grilla_o_mapa.cuadro_ancho
self._alto_cuadro = grilla_o_mapa.cuadro_alto
def _cargar_mapa(self, archivo):
"Carga el escenario desde un archivo .tmz (del programa tiled)."
archivo = pilas.utils.obtener_ruta_al_recurso(archivo)
# Carga los nodos principales.
nodo = pilas.utils.xmlreader.makeRootNode(archivo)
nodo_mapa = nodo.getChild('map')
nodo_tileset = nodo_mapa.getChild('tileset')
# Cantidad de bloques en el mapa.
self.columnas = int(nodo_mapa.getAttributeValue('width'))
self.filas = int(nodo_mapa.getAttributeValue('height'))
# Atributos de la imagen asociada al mapa.
self._ruta = nodo_tileset.getChild('image').getAttributeValue('source')
self._ruta = pilas.utils.obtener_ruta_al_recurso(self._ruta)
self._ancho_imagen = int(nodo_tileset.getChild('image').getAttributeValue('width'))
self._alto_imagen = int(nodo_tileset.getChild('image').getAttributeValue('height'))
self._ancho_cuadro = int(nodo_tileset.getAttributeValue('tilewidth'))
self._alto_cuadro = int(nodo_tileset.getAttributeValue('tileheight'))
# Carga la grilla de imagenes desde el mapa.
self.grilla = pilas.imagenes.cargar_grilla(self._ruta,
self._ancho_imagen / self._ancho_cuadro,
self._alto_imagen / self._alto_cuadro)
# Carga las capas del mapa.
layers = nodo.getChild('map').getChildren('layer')
if len(layers) == 0:
raise Exception("Debe tener al menos una capa (layer).")
# La capa 0 (inferior) define los bloques no-solidos.
self._crear_bloques(layers[0], solidos=False)
# El resto de las capas definen bloques solidos
for layer in layers[1:]:
self._crear_bloques(layer, solidos=True)
def _crear_bloques(self, capa, solidos):
"Genera actores que representan los bloques del escenario."
datos = capa.getChild('data').getData()
# Convierte todo el mapa en una matriz de numeros.
bloques = [[int(x) for x in x.split(',') if x] for x in datos.split()]
for (y, fila) in enumerate(bloques):
for (x, bloque) in enumerate(fila):
if bloque:
self.pintar_bloque(y, x, bloque -1, solidos)
def pintar_bloque(self, fila, columna, indice, es_bloque_solido=False):
nuevo_bloque = pilas.actores.Actor('invisible.png')
nuevo_bloque.imagen = self.grilla
nuevo_bloque.imagen.definir_cuadro(indice)
nuevo_bloque.izquierda = columna * self._ancho_cuadro - 320
nuevo_bloque.arriba = -fila * self._alto_cuadro + 240
self.bloques.append(nuevo_bloque)
if es_bloque_solido:
figura = pilas.fisica.Rectangulo(nuevo_bloque.izquierda + self._ancho_cuadro / 2,
nuevo_bloque.arriba - self._alto_cuadro / 2,
self._ancho_cuadro, self._alto_cuadro, dinamica=False,
restitucion=self.restitucion)
self.figuras.append(figura)
def reiniciar(self):
self._eliminar_bloques()
if isinstance(self.grilla_o_mapa, str):
self._cargar_mapa(self.grilla_o_mapa)
def eliminar(self):
self._eliminar_bloques()
def _eliminar_bloques(self):
for b in self.bloques:
b.eliminar()
for f in self.figuras:
f.eliminar()
| 0.003485 |
from setuptools import setup, find_packages
setup(
name = "django-report-scaffold",
version = "0.1.4",
author = "David Burke",
author_email = "[email protected]",
description = ("Create streamlined and flexible reporting tools for your end uesrs. Report scaffold is not a drop in application but a framework for creating reporting tools. Think of it like django admin."),
license = "BSD",
keywords = "django report",
url = "https://github.com/burke-software/django-report-scaffold",
packages=find_packages(),
include_package_data=True,
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=(
'django-setuptest',
),
classifiers=[
"Development Status :: 5 - Production/Stable",
'Environment :: Web Environment',
'Framework :: Django',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
"License :: OSI Approved :: BSD License",
],
install_requires=[
'django',
'django-report-utils',
'django-widget-tweaks',
]
)
| 0.014847 |
"""
Iterator based sre token scanner
"""
import re
from re import VERBOSE, MULTILINE, DOTALL
import sre_parse
import sre_compile
import sre_constants
from sre_constants import BRANCH, SUBPATTERN
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# Combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator | 0.001916 |
"""
Installs and configures quantum
"""
import logging
import os
import uuid
from packstack.installer import utils
from packstack.installer import validators
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-QUANTUM"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Quantum configuration")
conf_params = {
"QUANTUM" : [
{"CMD_OPTION" : "quantum-server-host",
"USAGE" : "The IP addresses of the server on which to install the Quantum server",
"PROMPT" : "Enter the IP address of the Quantum server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_SERVER_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-use-namespaces",
"USAGE" : "Enable network namespaces for Quantum",
"PROMPT" : "Should Quantum use network namespaces?",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_USE_NAMESPACES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-ks-password",
"USAGE" : "The password to use for Quantum to authenticate with Keystone",
"PROMPT" : "Enter the password for Quantum Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-db-password",
"USAGE" : "The password to use for Quantum to access DB",
"PROMPT" : "Enter the password for Quantum DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-l3-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Quantum L3 agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Quantum L3 agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_L3_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-l3-ext-bridge",
"USAGE" : "The name of the bridge that the Quantum L3 agent will use for external traffic",
"PROMPT" : "Enter the name of the bridge that the Quantum L3 agent will use for external traffic",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "br-ex",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_L3_EXT_BRIDGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-dhcp-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Quantum DHCP plugin",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install Quantum DHCP plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_DHCP_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-l2-plugin",
"USAGE" : "The name of the L2 plugin to be used with Quantum",
"PROMPT" : "Enter the name of the L2 plugin to be used with Quantum",
"OPTION_LIST" : ["linuxbridge", "openvswitch"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "openvswitch",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_L2_PLUGIN",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-metadata-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install Quantum metadata agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Quantum metadata agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_METADATA_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-metadata-pw",
"USAGE" : "A comma separated list of IP addresses on which to install Quantum metadata agent",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Quantum metadata agent",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_METADATA_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
],
"QUANTUM_LB_PLUGIN" : [
{"CMD_OPTION" : "quantum-lb-tenant-network-type",
"USAGE" : "The type of network to allocate for tenant networks",
"PROMPT" : "Enter the type of network to allocate for tenant networks",
"OPTION_LIST" : ["local", "vlan"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "local",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_LB_TENANT_NETWORK_TYPE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-lb-vlan-ranges",
"USAGE" : "A comma separated list of VLAN ranges for the Quantum linuxbridge plugin",
"PROMPT" : "Enter a comma separated list of VLAN ranges for the Quantum linuxbridge plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_LB_VLAN_RANGES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-lb-interface-mappings",
"USAGE" : "A comma separated list of interface mappings for the Quantum linuxbridge plugin",
"PROMPT" : "Enter a comma separated list of interface mappings for the Quantum linuxbridge plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_LB_INTERFACE_MAPPINGS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
"QUANTUM_OVS_PLUGIN" : [
{"CMD_OPTION" : "quantum-ovs-tenant-network-type",
"USAGE" : "Type of network to allocate for tenant networks",
"PROMPT" : "Enter the type of network to allocate for tenant networks",
"OPTION_LIST" : ["local", "vlan", "gre"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "local",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_QUANTUM_OVS_TENANT_NETWORK_TYPE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-ovs-vlan-ranges",
"USAGE" : "A comma separated list of VLAN ranges for the Quantum openvswitch plugin",
"PROMPT" : "Enter a comma separated list of VLAN ranges for the Quantum openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_OVS_VLAN_RANGES",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "quantum-ovs-bridge-mappings",
"USAGE" : "A comma separated list of bridge mappings for the Quantum openvswitch plugin",
"PROMPT" : "Enter a comma separated list of bridge mappings for the Quantum openvswitch plugin",
"OPTION_LIST" : [],
"VALIDATORS" : [],
"DEFAULT_VALUE" : "physnet1:1000:2000",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_QUANTUM_OVS_BRIDGE_MAPPINGS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
],
}
conf_groups = [
{ "GROUP_NAME" : "QUANTUM",
"DESCRIPTION" : "Quantum config",
"PRE_CONDITION" : "CONFIG_QUANTUM_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "QUANTUM_LB_PLUGIN",
"DESCRIPTION" : "Quantum LB plugin config",
"PRE_CONDITION" : "CONFIG_QUANTUM_L2_PLUGIN",
"PRE_CONDITION_MATCH" : "linuxbridge",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
{ "GROUP_NAME" : "QUANTUM_OVS_PLUGIN",
"DESCRIPTION" : "Quantum OVS plugin config",
"PRE_CONDITION" : "CONFIG_QUANTUM_L2_PLUGIN",
"PRE_CONDITION_MATCH" : "openvswitch",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True },
]
for group in conf_groups:
paramList = conf_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
def getInterfaceDriver():
if controller.CONF["CONFIG_QUANTUM_L2_PLUGIN"] == "openvswitch":
return 'quantum.agent.linux.interface.OVSInterfaceDriver'
elif controller.CONF['CONFIG_QUANTUM_L2_PLUGIN'] == 'linuxbridge':
return 'quantum.agent.linux.interface.BridgeInterfaceDriver'
def initSequences(controller):
if controller.CONF['CONFIG_QUANTUM_INSTALL'] != 'y':
return
if controller.CONF['CONFIG_QUANTUM_USE_NAMESPACES'] == 'y':
controller.CONF['CONFIG_QUANTUM_USE_NAMESPACES'] = 'True'
else:
controller.CONF['CONFIG_QUANTUM_USE_NAMESPACES'] = 'False'
global api_hosts, l3_hosts, dhcp_hosts, compute_hosts, meta_hosts, q_hosts
api_hosts = set(controller.CONF['CONFIG_QUANTUM_SERVER_HOST'].split(','))
l3_hosts = set(controller.CONF['CONFIG_QUANTUM_L3_HOSTS'].split(','))
dhcp_hosts = set(controller.CONF['CONFIG_QUANTUM_DHCP_HOSTS'].split(','))
meta_hosts = set(controller.CONF['CONFIG_QUANTUM_METADATA_HOSTS'].split(','))
compute_hosts = set(controller.CONF['CONFIG_NOVA_COMPUTE_HOSTS'].split(','))
q_hosts = api_hosts | l3_hosts | dhcp_hosts | compute_hosts | meta_hosts
quantum_steps = [
{'title': 'Adding Quantum API manifest entries', 'functions':[createManifest]},
{'title': 'Adding Quantum Keystone manifest entries', 'functions':[createKeystoneManifest]},
{'title': 'Adding Quantum L3 manifest entries', 'functions':[createL3Manifests]},
{'title': 'Adding Quantum L2 Agent manifest entries', 'functions':[createL2AgentManifests]},
{'title': 'Adding Quantum DHCP Agent manifest entries', 'functions':[createDHCPManifests]},
{'title': 'Adding Quantum Metadata Agent manifest entries', 'functions':[createMetadataManifests]},
]
controller.addSequence("Installing OpenStack Quantum", [], [], quantum_steps)
def createManifest(config):
global q_hosts
for host in q_hosts:
if host in api_hosts:
controller.CONF['CONFIG_QUANTUM_SERVER_ENABLE'] = 'true'
else:
controller.CONF['CONFIG_QUANTUM_SERVER_ENABLE'] = 'false'
manifest_file = "%s_quantum.pp" % (host,)
manifest_data = getManifestTemplate("quantum.pp")
appendManifestFile(manifest_file, manifest_data, 'quantum')
# Set up any l2 plugin configs we need anywhere we install quantum
# XXX I am not completely sure about this, but it seems necessary
if controller.CONF['CONFIG_QUANTUM_L2_PLUGIN'] == 'openvswitch':
manifest_data = getManifestTemplate("quantum_ovs_plugin.pp")
appendManifestFile(manifest_file, manifest_data, 'quantum')
elif controller.CONF['CONFIG_QUANTUM_L2_PLUGIN'] == 'linuxbridge':
# Eventually linuxbridge module will need to spearate plugin/agent functionality
pass
def createKeystoneManifest(config):
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_quantum.pp")
appendManifestFile(manifestfile, manifestdata)
def createL3Manifests(config):
global l3_hosts
for host in l3_hosts:
controller.CONF['CONFIG_QUANTUM_L3_HOST'] = host
controller.CONF['CONFIG_QUANTUM_L3_INTERFACE_DRIVER'] = getInterfaceDriver()
manifestdata = getManifestTemplate("quantum_l3.pp")
manifestfile = "%s_quantum.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + '\n')
if controller.CONF['CONFIG_QUANTUM_L2_PLUGIN'] == 'openvswitch' and controller.CONF['CONFIG_QUANTUM_L3_EXT_BRIDGE']:
controller.CONF['CONFIG_QUANTUM_OVS_BRIDGE'] = controller.CONF['CONFIG_QUANTUM_L3_EXT_BRIDGE']
manifestdata = getManifestTemplate('quantum_ovs_bridge.pp')
appendManifestFile(manifestfile, manifestdata + '\n')
def createDHCPManifests(config):
global dhcp_hosts
for host in dhcp_hosts:
controller.CONF["CONFIG_QUANTUM_DHCP_HOST"] = host
controller.CONF['CONFIG_QUANTUM_DHCP_INTERFACE_DRIVER'] = getInterfaceDriver()
manifestdata = getManifestTemplate("quantum_dhcp.pp")
manifestfile = "%s_quantum.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + "\n")
def createL2AgentManifests(config):
global compute_hosts, dhcp_host, l3_hosts
if controller.CONF["CONFIG_QUANTUM_L2_PLUGIN"] == "openvswitch":
host_var = 'CONFIG_QUANTUM_OVS_HOST'
template_name = 'quantum_ovs_agent.pp'
elif controller.CONF["CONFIG_QUANTUM_L2_PLUGIN"] == "linuxbridge":
host_var = 'CONFIG_QUANTUM_LB_HOST'
template_name = 'quantum_lb_agent.pp'
else:
raise KeyError("Unknown layer2 agent")
# Install l2 agents on every compute host in addition to any hosts listed
# specifically for the l2 agent
for host in compute_hosts | dhcp_hosts | l3_hosts:
controller.CONF[host_var] = host
manifestdata = getManifestTemplate(template_name)
manifestfile = "%s_quantum.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + "\n")
def createMetadataManifests(config):
global meta_hosts
for host in meta_hosts:
controller.CONF['CONFIG_QUANTUM_METADATA_HOST'] = host
manifestdata = getManifestTemplate('quantum_metadata.pp')
manifestfile = "%s_quantum.pp" % (host,)
appendManifestFile(manifestfile, manifestdata + "\n")
| 0.015196 |
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Andi Albrecht, [email protected]
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
import sys
from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from cStringIO import StringIO
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception, err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in cls.tokens.keys():
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object):
__metaclass__ = LexerMeta
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE | re.UNICODE
tokens = {
'root': [
(r'(--|# ).*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'(--|# ).*?$', tokens.Comment.Single),
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([^\W\d]\w*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'%\(\w+\)s', tokens.Name.Placeholder),
(r'%s', tokens.Name.Placeholder),
(r'[$:?]\w+', tokens.Name.Placeholder),
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
(r'VALUES', tokens.Keyword),
(r'(@|##|#)[^\W\d_]\w+', tokens.Name),
# IN is special, it may be followed by a parenthesis, but
# is never a functino, see issue183
(r'in\b(?=[ (])?', tokens.Keyword),
(r'[^\W\d_]\w*(?=[.(])', tokens.Name), # see issue39
(r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[-]?[0-9]*(\.[0-9]+)?[eE][-]?[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]+', tokens.Number.Integer),
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
# sqlite names can be escaped with [square brackets]. left bracket
# cannot be preceded by word character or a right bracket --
# otherwise it's probably an array index
(r'(?<![\w\])])(\[[^\]]+\])', tokens.Name),
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE(\s+OR\s+REPLACE)?\b', tokens.Keyword.DDL),
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
(r'(?<=\.)[^\W\d_]\w*', tokens.Name),
(r'[^\W\d]\w*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline),
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def _decode(self, text):
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
else:
try:
text = text.decode(self.encoding)
except UnicodeDecodeError:
text = text.decode('unicode-escape')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
return text
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if isinstance(text, basestring):
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if sys.version_info[0] < 3 and isinstance(text, unicode):
text = StringIO(text.encode('utf-8'))
self.encoding = 'utf-8'
else:
text = StringIO(text)
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, stream, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens # see __call__, pylint:disable=E1101
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
text = stream.read()
text = self._decode(text)
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
elif (
# Ugly hack - multiline-comments
# are not stackable
state != 'multiline-comments'
or not statestack
or statestack[-1] != 'multiline-comments'
):
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, u'\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql, encoding=None):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
if encoding is not None:
lexer.encoding = encoding
return lexer.get_tokens(sql)
| 0.000146 |
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <[email protected]>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| 0.004965 |
__date__ = "3 june 2010"
__author__ = '[email protected]'
class ParamCardWriter(object):
header = \
"""######################################################################\n""" + \
"""## PARAM_CARD AUTOMATICALY GENERATED BY THE UFO #####################\n""" + \
"""######################################################################\n"""
def __init__(self, filename, list_of_parameters=None):
"""write a valid param_card.dat"""
if not list_of_parameters:
from parameters import all_parameters
list_of_parameters = [param for param in all_parameters if \
param.nature=='external']
self.fsock = open(filename, 'w')
self.fsock.write(self.header)
self.write_card(list_of_parameters)
def write_card(self, all_ext_param):
""" """
# list all lhablock
all_lhablock = set([param.lhablock for param in all_ext_param])
# ordonate lhablock alphabeticaly
list(all_lhablock).sort()
for lhablock in all_lhablock:
self.write_block(lhablock)
[self.write_param(param, lhablock) for param in all_ext_param if \
param.lhablock == lhablock]
def write_block(self, name):
""" write a comment for a block"""
self.fsock.writelines(
"""\n###################################""" + \
"""\n## INFORMATION FOR %s""" % name.upper() +\
"""\n###################################\n"""
)
if name!='DECAY':
self.fsock.write("""Block %s \n""" % name)
def write_param(self, param, lhablock):
lhacode=' '.join(['%3s' % key for key in param.lhacode])
if lhablock != 'DECAY':
text = """ %s %e # %s \n""" % (lhacode, param.value, param.name )
else:
text = '''DECAY %s %e \n''' % (lhacode, param.value)
self.fsock.write(text)
if '__main__' == __name__:
ParamCardWriter('./param_card.dat')
print 'done'
| 0.020143 |
import csv
import sys
from os.path import basename
import pulp
import tablib
from collections import OrderedDict
from scipy.optimize import linprog
class LinearProgramming():
"""docstring for LinearProgramming"""
def __init__(self, inputFile):
with open(inputFile) as csvFile:
csvreader = csv.reader(csvFile, delimiter=',')
data = [map(float, data) for data in csvreader]
# Create an instance of an OrderedDictionary for the Z_dic_func
self.Z_dic_func = OrderedDict()
# N dimensional array that contains holds all info from csv
self.prob_matrix = data
# Initialize the linear problem model and set it to a maximization problem
self.lp_prob = pulp.LpProblem("LP Problem for " + basename(inputFile), pulp.LpMaximize)
# Get unit profit array from prob_matrix
self.unit_profit = self.prob_matrix[-1:][0]
self.var_coeffs = []
# Fill up dictionary with non-negativity conditions and corresponding unit_profit
for i in range(len(self.unit_profit)):
key = "X{0}".format((i + 1))
var = pulp.LpVariable(key, lowBound=0, cat='Continuous')
self.Z_dic_func[key] = (var, self.unit_profit[i])
self.var_coeffs.append(var)
# ADD objective function Z
@property
def ObjectFunc(self):
self.lp_prob += sum(val * key for key, val in self.Z_dic_func.itervalues()), "Z"
return self.lp_prob.objective
@property
def probVars(self):
return self.lp_prob.variables()
# ADD Constraints
@property
def Constraints(self):
no_constraints = len(self.prob_matrix) - 1
for i in range(no_constraints):
curr_constraint = self.prob_matrix[i]
constraint_len = len(curr_constraint) - 1
sum_of_constraints = sum(x * coeff for x,
coeff in zip(curr_constraint[:constraint_len],
self.var_coeffs))
self.lp_prob += sum_of_constraints <= curr_constraint[constraint_len], "E%d" % (i + 1)
return self.lp_prob.constraints
@property
def SolveProb(self):
self.lp_prob.solve()
prob_results = {
'prob_status': pulp.LpStatus[self.lp_prob.status],
'objective_val': round(pulp.value(self.lp_prob.objective), 2),
'prob_vars': self.lp_prob.variables()
}
return prob_results
def slack_and_iterations(self):
A_ub = []
b_ub = []
c = [-1 * self.unit_profit[i] for i in range(len(self.unit_profit))]
for A in self.prob_matrix[:-1]:
A_ub.append(A[:-1])
b_ub.append(A[-1:][0])
res = linprog(c, A_ub, b_ub, method='simplex', options={"disp": False})
slack = []
if len(res.slack) != 0:
total_no_vars = len(self.var_coeffs) + len(res.slack)
slack = zip(["X" + str(i) for i in range(len(self.var_coeffs) + 1, total_no_vars + 1)], res.slack)
result = {
"slack": slack,
"nit": res.nit
}
return result
@property
def genTable(self):
dataset = tablib.Dataset()
dataset.headers = ["Equipment/Product"] + ["P" + str(i + 1) for i in range(len(self.unit_profit))]
dataset.headers.append("Available Resources")
no_constraints = len(self.prob_matrix) - 1
for i in range(no_constraints):
dataset.append(["E" + str(i + 1)] + self.prob_matrix[i])
dataset.append(["Unit Profit ($)"] + [self.unit_profit[i] for i in range(len(self.unit_profit))] + [" "])
return dataset.html
def __main(argv):
# if argv == []:
# print "Please provide a file: HW1.py <inputfile>"
# return
# inputFile = argv[0]
#
lp = LinearProgramming("./test_data/lp_example.csv")
lp.ObjectFunc
lp.Constraints
lp.SolveProb
print "Status of our Problem: ", pulp.LpStatus[lp.lp_prob.status]
for product in lp.lp_prob.variables():
print "We need to produce {0} of Product {1} to maximize profit".format(product.varValue, product.name)
print "Maximum profit company can earn is {}".format(pulp.value(lp.lp_prob.objective))
print lp.slack_and_iterations()
if __name__ == '__main__':
__main(sys.argv[1:])
| 0.002739 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import six
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
au = pyrax.autoscale
def safe_int(val, allow_zero=True):
"""
This function converts the six.moves.input values to integers. It handles
invalid entries, and optionally forbids values of zero.
"""
try:
ret = int(val)
except ValueError:
print("Sorry, '%s' is not a valid integer." % val)
return False
if not allow_zero and ret == 0:
print("Please enter a non-zero integer.")
return False
return ret
# Get the current scaling groups
sgs = au.list()
if not sgs:
print("There are no scaling groups defined.")
exit()
print()
print("Available Scaling Groups:")
for pos, sg in enumerate(sgs):
print("%s - %s" % (pos, sg.name))
intanswer = -1
while intanswer < 0:
answer = six.moves.input("Enter the number of the scaling group: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if intanswer is False:
intanswer = -1
continue
if not 0 <= intanswer < len(sgs):
print("The number '%s' does not correspond to any scaling group." % answer)
intanswer = -1
policies = sg.list_policies()
if not policies:
print("There are no policies defined for this scaling group.")
exit()
for pos, policy in enumerate(policies):
print("%s - %s" % (pos, policy.name))
answer = six.moves.input("Enter the number of the policy to delete: ")
if not answer:
print("Nothing entered; exiting.")
exit()
intanswer = safe_int(answer)
if not 0 <= intanswer < len(policies):
print("The number '%s' does not correspond to any policy." % answer)
exit()
policy = policies[intanswer]
policy.delete()
print("Policy '%s' has been deleted." % policy.name)
| 0.001137 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DisplayFile'
db.delete_table('submission_displayfile')
def backwards(self, orm):
# Adding model 'DisplayFile'
db.create_table('submission_displayfile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('display_type', self.gf('django.db.models.fields.CharField')(max_length=10)),
('display_obj', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('fhash', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('submission', ['DisplayFile'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filestorage.fileset': {
'Meta': {'object_name': 'FileSet'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repo_path': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'submission.license': {
'Meta': {'object_name': 'License'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'text_template': ('django.db.models.fields.TextField', [], {})
},
'submission.module': {
'Meta': {'object_name': 'Module'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'submission.revision': {
'Meta': {'ordering': "['date_created']", 'object_name': 'Revision'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['submission.Submission']"}),
'hash_id': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item_highlighted_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'item_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modules_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['submission.Module']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '155'}),
'sub_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['submission.License']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['tagging.Tag']", 'through': "orm['submission.TagCreation']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'update_reason': ('django.db.models.fields.CharField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'validation_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
'submission.submission': {
'Meta': {'object_name': 'Submission'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fileset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filestorage.FileSet']", 'null': 'True', 'blank': 'True'}),
'frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inspired_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'inspired_by_rel_+'", 'null': 'True', 'to': "orm['submission.Submission']"}),
'sub_type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'submission.tagcreation': {
'Meta': {'object_name': 'TagCreation'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['submission.Revision']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tagging.Tag']"})
},
'submission.zipfile': {
'Meta': {'object_name': 'ZipFile'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_zip_file': ('django.db.models.fields.files.FileField', [], {'max_length': '1024'}),
'zip_hash': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tag_type': ('django.db.models.fields.TextField', [], {'default': "'regular'", 'max_length': '10'})
}
}
complete_apps = ['submission'] | 0.007928 |
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extra output writers for MapReduce."""
__all__ = [
"PostgresOutputWriter"
]
import pg8000
from mapreduce.output_writers import OutputWriter, _get_params
from mapreduce import context
from mapreduce import errors
class PostgresOutputWriter(OutputWriter):
"""A customized output writer for Postgres database.
To use this output writer, the output of your mapper should be well-formatted SQL queries, which
will be executed in batch against the Postgres server that specified by your output_writer parameters.
An example of the parameters below:
...
output_writer_spec="mapreduce.third_party.custom_output_writers.PostgresOutputWriter",
params = {
"input_reader": {
...
},
"output_writer": {
"host": "127.0.0.1",
"port": 5432,
"database": "example_db",
"user": "postgres_user1",
"password": "kjkjegkajgklejkjak"
}
},
"""
def __init__(self, host=None, port=None, database=None, user=None, password=None): # pylint: disable=W0231
self.host = host
self.port = port
self.database = database
self.user = user
self.password = password
@classmethod
def create(cls, mr_spec, shard_number, shard_attempt, _writer_state=None):
mapper_spec = mr_spec.mapper
params = _get_params(mapper_spec)
return cls(host=params.get('host'),
port=params.get('port'),
database=params.get('database'),
user=params.get('user'),
password=params.get('password'))
def write(self, data):
ctx = context.get()
pg_pool = ctx.get_pool('postgres_pool')
if not pg_pool:
pg_pool = _PostgresPool(ctx=ctx,
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password)
ctx.register_pool('postgres_pool', pg_pool)
pg_pool.append(data)
def to_json(self):
return {
"host": self.host,
"port": self.port,
"database": self.database,
"user": self.user,
"password": self.password
}
@classmethod
def from_json(cls, state):
return cls(host=state.get('host'),
port=state.get('port'),
database=state.get('database'),
user=state.get('user'),
password=state.get('password'))
@classmethod
def validate(cls, mapper_spec):
required_params = ["host", "port", "database", "user", "password"]
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
params = _get_params(mapper_spec)
if not all([arg in params for arg in required_params]):
raise errors.BadWriterParamsError("Output writer requires parameters [{}]".format(', '.join(required_params)))
if not isinstance(params.get("port"), int):
raise errors.BadWriterParamsError("Parameter 'port' must be integer.")
@classmethod
def init_job(cls, mapreduce_state):
pass
def finalize(self, ctx, shard_state):
pass
@classmethod
def finalize_job(cls, mapreduce_state):
pass
@classmethod
def get_filenames(cls, mapreduce_state):
return []
class _PostgresPool(context.Pool):
"""A mutation pool that accumulate writes of PostgresOutputWriter."""
PG_POOL_SIZE = 200
def __init__(self, ctx=None, host=None, port=None, database=None, user=None, password=None):
self._queries = []
self._size = 0
self._ctx = ctx
self._conn = pg8000.connect(host=host, port=port, database=database,
user=user, password=password, ssl=True)
def append(self, query):
self._queries.append(query)
self._size += 1
if self._size > self.PG_POOL_SIZE:
self.flush()
def flush(self):
if self._queries:
cur = self._conn.cursor()
for query in self._queries:
cur.execute(query)
cur.close()
self._conn.commit()
self._queries = []
self._size = 0
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.flush()
self._conn.close()
| 0.008225 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import tempfile
import unittest
from pants.backend.jvm.subsystems.shader import Shader, Shading
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_delete
from pants_test.subsystem.subsystem_util import subsystem_instance
class ShaderTest(unittest.TestCase):
def setUp(self):
self.jarjar = '/not/really/jarjar.jar'
with subsystem_instance(DistributionLocator):
executor = SubprocessExecutor(DistributionLocator.cached())
self.shader = Shader(jarjar_classpath=[self.jarjar], executor=executor)
self.output_jar = '/not/really/shaded.jar'
def populate_input_jar(self, *entries):
fd, input_jar_path = tempfile.mkstemp()
os.close(fd)
self.addCleanup(safe_delete, input_jar_path)
with open_zip(input_jar_path, 'w') as jar:
for entry in entries:
jar.writestr(entry, '0xCAFEBABE')
return input_jar_path
def test_assemble_default_rules(self):
input_jar = self.populate_input_jar('org/pantsbuild/tools/fake/Main.class',
'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('org.pantsbuild.tools.fake.Main', input_jar)
self.assertEqual(Shader.exclude_package('org.pantsbuild.tools.fake'), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_default_rules_default_package(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
rules = self.shader.assemble_binary_rules('main', input_jar)
self.assertEqual(Shader.exclude_package(), rules[0])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[1:-1])
self.assertEqual(Shader.shade_package('com.google.common.base'), rules[-1])
def test_assemble_custom_rules(self):
input_jar = self.populate_input_jar('main.class')
rules = self.shader.assemble_binary_rules('main', input_jar,
custom_rules=[Shader.shade_class('bob'),
Shader.exclude_class('fred')])
self.assertEqual(Shader.shade_class('bob'), rules[0])
self.assertEqual(Shader.exclude_class('fred'), rules[1])
self.assertEqual(Shader.exclude_package(), rules[2])
self.assertIn(Shader.exclude_package('javax.annotation'), rules[3:])
def test_runner_command(self):
input_jar = self.populate_input_jar('main.class', 'com/google/common/base/Function.class')
custom_rules = [Shader.exclude_package('log4j', recursive=True)]
with self.shader.binary_shader(self.output_jar, 'main', input_jar,
custom_rules=custom_rules) as shader:
command = shader.command
self.assertTrue(command.pop(0).endswith('java'))
jar_or_cp = command.pop(0)
self.assertIn(jar_or_cp, {'-cp', 'classpath', '-jar'})
self.assertEqual(self.jarjar, os.path.abspath(command.pop(0)))
if jar_or_cp != '-jar':
# We don't really care what the name of the jarjar main class is - shader.command[2]
command.pop(0)
self.assertEqual('process', command.pop(0))
rules_file = command.pop(0)
self.assertTrue(os.path.exists(rules_file))
with open(rules_file) as fp:
lines = fp.read().splitlines()
self.assertEqual('rule log4j.** log4j.@1', lines[0]) # The custom rule.
self.assertEqual('rule * @1', lines[1]) # Exclude main's package.
self.assertIn('rule javax.annotation.* javax.annotation.@1', lines) # Exclude system.
self.assertEqual('rule com.google.common.base.* {}com.google.common.base.@1'
.format(Shading.SHADE_PREFIX), lines[-1]) # Shade the rest.
self.assertEqual(input_jar, command.pop(0))
self.assertEqual(self.output_jar, command.pop(0))
def test_sanitize_package_name(self):
def assert_sanitize(name, sanitized):
self.assertEqual(sanitized, Shading.Relocate._sanitize_package_name(name))
assert_sanitize('hello', 'hello')
assert_sanitize('hello.goodbye', 'hello.goodbye')
assert_sanitize('.hello.goodbye', 'hello.goodbye')
assert_sanitize('hello.goodbye.', 'hello.goodbye')
assert_sanitize('123', '_123')
assert_sanitize('123.456', '_123._456')
assert_sanitize('123.v2', '_123.v2')
assert_sanitize('hello-goodbye', 'hello_goodbye')
assert_sanitize('hello-/.goodbye.?', 'hello__.goodbye._')
assert_sanitize('one.two..three....four.', 'one.two.three.four')
def test_infer_shaded_pattern(self):
def assert_inference(from_pattern, prefix, to_pattern):
result = ''.join(Shading.Relocate._infer_shaded_pattern_iter(from_pattern, prefix))
self.assertEqual(to_pattern, result)
assert_inference('com.foo.bar.Main', None, 'com.foo.bar.Main')
assert_inference('com.foo.bar.', None, 'com.foo.bar.')
assert_inference('com.foo.bar.', '__prefix__.', '__prefix__.com.foo.bar.')
assert_inference('com.*.bar.', None, '[email protected].')
assert_inference('com.*.bar.*.', None, '[email protected].@2.')
assert_inference('com.*.bar.**', None, '[email protected].@2')
assert_inference('*', None, '@1')
assert_inference('**', None, '@1')
assert_inference('**', '__prefix__.', '__prefix__.@1')
def test_shading_exclude(self):
def assert_exclude(from_pattern, to_pattern):
self.assertEqual((from_pattern, to_pattern), Shading.Exclude.new(from_pattern).rule())
assert_exclude('com.foo.bar.Main', 'com.foo.bar.Main')
assert_exclude('com.foo.bar.**', 'com.foo.bar.@1')
assert_exclude('com.*.bar.**', '[email protected].@2')
def test_shading_exclude_package(self):
self.assertEqual(('com.foo.bar.**', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', 'com.foo.bar.@1'),
Shading.ExcludePackage.new('com.foo.bar', recursive=False).rule())
def test_relocate(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.Relocate.new(from_pattern='com.foo.bar.**').rule())
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.').rule())
self.assertEqual(('com.foo.bar.**', 'org.biz.baz.@1'.format('__my_prefix__.')),
Shading.Relocate.new(from_pattern='com.foo.bar.**',
shade_prefix='__my_prefix__.',
shade_pattern='org.biz.baz.@1').rule())
def test_relocate_package(self):
self.assertEqual(('com.foo.bar.**', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar').rule())
self.assertEqual(('com.foo.bar.*', '{}com.foo.bar.@1'.format(Shading.SHADE_PREFIX)),
Shading.RelocatePackage.new('com.foo.bar', recursive=False).rule())
self.assertEqual(('com.foo.bar.**', '__p__.com.foo.bar.@1'),
Shading.RelocatePackage.new('com.foo.bar', shade_prefix='__p__.').rule())
| 0.007642 |
# Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""
Implementation of Gleason's (1999) non-iterative upper quantile
studentized range approximation.
According to Gleason this method should be more accurate than the
AS190 FORTRAN algorithm of Lund and Lund (1983) and works from .5
<= p <= .999 (The AS190 only works from .9 <= p <= .99).
It is more efficient then the Copenhaver & Holland (1988) algorithm
(used by the _qtukey_ R function) although it requires storing the A
table in memory. (q distribution) approximations in Python.
see:
Gleason, J. R. (1999). An accurate, non-iterative approximation
for studentized range quantiles. Computational Statistics &
Data Analysis, (31), 147-158.
Gleason, J. R. (1998). A table of quantile points of the
Studentized range distribution.
http://www.stata.com/stb/stb46/dm64/sturng.pdf
"""
import math
import scipy.stats
import numpy as np
from scipy.optimize import fminbound
inf = np.inf
__version__ = '0.2.3'
# changelog
# 0.1 - initial release
# 0.1.1 - vectorized
# 0.2 - psturng added
# 0.2.1 - T, R generation script relegated to make_tbls.py
# 0.2.2
# - select_points refactored for performance to select_ps and
# select_vs
# - pysturng tester added.
# 0.2.3 - uses np.inf and np.isinf
# Gleason's table was derived using least square estimation on the tabled
# r values for combinations of p and v. In total there are 206
# estimates over p-values of .5, .75, .9, .95, .975, .99, .995,
# and .999, and over v (degrees of freedom) of (1) - 20, 24, 30, 40,
# 60, 120, and inf. combinations with p < .95 don't have coefficients
# for v = 1. Hence the parentheses. These coefficients allow us to
# form f-hat. f-hat with the inverse t transform of tinv(p,v) yields
# a fairly accurate estimate of the studentized range distribution
# across a wide range of values. According to Gleason this method
# should be more accurate than algorithm AS190 of Lund and Lund (1983)
# and work across a wider range of values (The AS190 only works
# from .9 <= p <= .99). R's qtukey algorithm was used to add tables
# at .675, .8, and .85. These aid approximations when p < .9.
#
# The code that generated this table is called make_tbls.py and is
# located in version control.
A = {(0.1, 2.0): [-2.2485085243379075, -1.5641014278923464, 0.55942294426816752, -0.060006608853883377],
(0.1, 3.0): [-2.2061105943901564, -1.8415406600571855, 0.61880788039834955, -0.062217093661209831],
(0.1, 4.0): [-2.1686691786678178, -2.008196172372553, 0.65010084431947401, -0.06289005500114471],
(0.1, 5.0): [-2.145077200277393, -2.112454843879346, 0.66701240582821342, -0.062993502233654797],
(0.1, 6.0): [-2.0896098049743155, -2.2400004934286497, 0.70088523391700142, -0.065907568563272748],
(0.1, 7.0): [-2.0689296655661584, -2.3078445479584873, 0.71577374609418909, -0.067081034249350552],
(0.1, 8.0): [-2.0064956480711262, -2.437400413087452, 0.76297532367415266, -0.072805518121505458],
(0.1, 9.0): [-2.3269477513436061, -2.0469494712773089, 0.60662518717720593, -0.054887108437009016],
(0.1, 10.0): [-2.514024350177229, -1.8261187841127482, 0.51674358077906746, -0.044590425150963633],
(0.1, 11.0): [-2.5130181309130828, -1.8371718595995694, 0.51336701694862252, -0.043761825829092445],
(0.1, 12.0): [-2.5203508109278823, -1.8355687130611862, 0.5063486549107169, -0.042646205063108261],
(0.1, 13.0): [-2.5142536438310477, -1.8496969402776282, 0.50616991367764153, -0.042378379905665363],
(0.1, 14.0): [-2.3924634153781352, -2.013859173066078, 0.56421893251638688, -0.048716888109540266],
(0.1, 15.0): [-2.3573552940582574, -2.0576676976224362, 0.57424068771143233, -0.049367487649225841],
(0.1, 16.0): [-2.3046427483044871, -2.1295959138627993, 0.59778272657680553, -0.051864829216301617],
(0.1, 17.0): [-2.2230551072316125, -2.2472837435427127, 0.64255758243215211, -0.057186665209197643],
(0.1, 18.0): [-2.3912859179716897, -2.0350604070641269, 0.55924788749333332, -0.047729331835226464],
(0.1, 19.0): [-2.4169773092220623, -2.0048217969339146, 0.54493039319748915, -0.045991241346224065],
(0.1, 20.0): [-2.4264087194660751, -1.9916614057049267, 0.53583555139648154, -0.04463049934517662],
(0.1, 24.0): [-2.3969903132061869, -2.0252941869225345, 0.53428382141200137, -0.043116495567779786],
(0.1, 30.0): [-2.2509922780354623, -2.2309248956124894, 0.60748041324937263, -0.051427415888817322],
(0.1, 40.0): [-2.1310090183854946, -2.3908466074610564, 0.65844375382323217, -0.05676653804036895],
(0.1, 60.0): [-1.9240060179027036, -2.6685751031012233, 0.75678826647453024, -0.067938584352398995],
(0.1, 120.0): [-1.9814895487030182, -2.5962051736978373, 0.71793969041292693, -0.063126863201511618],
(0.1, inf): [-1.913410267066703, -2.6947367328724732, 0.74742335122750592, -0.06660897234304515],
(0.5, 2.0): [-0.88295935738770648, -0.1083576698911433, 0.035214966839394388, -0.0028576288978276461],
(0.5, 3.0): [-0.89085829205846834, -0.10255696422201063, 0.033613638666631696, -0.0027101699918520737],
(0.5, 4.0): [-0.89627345339338116, -0.099072524607668286, 0.032657774808907684, -0.0026219007698204916],
(0.5, 5.0): [-0.89959145511941052, -0.097272836582026817, 0.032236187675182958, -0.0025911555217019663],
(0.5, 6.0): [-0.89959428735702474, -0.098176292411106647, 0.032590766960226995, -0.0026319890073613164],
(0.5, 7.0): [-0.90131491102863937, -0.097135907620296544, 0.032304124993269533, -0.0026057965808244125],
(0.5, 8.0): [-0.90292500599432901, -0.096047500971337962, 0.032030946615574568, -0.0025848748659053891],
(0.5, 9.0): [-0.90385598607803697, -0.095390771554571888, 0.031832651111105899, -0.0025656060219315991],
(0.5, 10.0): [-0.90562524936125388, -0.093954488089771915, 0.031414451048323286, -0.0025257834705432031],
(0.5, 11.0): [-0.90420347371173826, -0.095851656370277288, 0.0321150356209743, -0.0026055056400093451],
(0.5, 12.0): [-0.90585973471757664, -0.094449306296728028, 0.031705945923210958, -0.0025673330195780191],
(0.5, 13.0): [-0.90555437067293054, -0.094792991050780248, 0.031826594964571089, -0.0025807109129488545],
(0.5, 14.0): [-0.90652756604388762, -0.093792156994564738, 0.031468966328889042, -0.0025395175361083741],
(0.5, 15.0): [-0.90642323700400085, -0.094173017520487984, 0.031657517378893905, -0.0025659271829033877],
(0.5, 16.0): [-0.90716338636685234, -0.093785178083820434, 0.031630091949657997, -0.0025701459247416637],
(0.5, 17.0): [-0.90790133816769714, -0.093001147638638884, 0.031376863944487084, -0.002545143621663892],
(0.5, 18.0): [-0.9077432927051563, -0.093343516378180599, 0.031518139662395313, -0.0025613906133277178],
(0.5, 19.0): [-0.90789499456490286, -0.09316964789456067, 0.031440782366342901, -0.0025498353345867453],
(0.5, 20.0): [-0.90842707861030725, -0.092696016476608592, 0.031296040311388329, -0.0025346963982742186],
(0.5, 24.0): [-0.9083281347135469, -0.092959308144970776, 0.031464063190077093, -0.0025611384271086285],
(0.5, 30.0): [-0.90857624050016828, -0.093043139391980514, 0.031578791729341332, -0.0025766595412777147],
(0.5, 40.0): [-0.91034085045438684, -0.091978035738914568, 0.031451631000052639, -0.0025791418103733297],
(0.5, 60.0): [-0.91084356681030032, -0.091452675572423425, 0.031333147984820044, -0.0025669786958144843],
(0.5, 120.0): [-0.90963649561463833, -0.093414563261352349, 0.032215602703677425, -0.0026704024780441257],
(0.5, inf): [-0.91077157500981665, -0.092899220350334571, 0.032230422399363315, -0.0026696941964372916],
(0.675, 2.0): [-0.67231521026565144, -0.097083624030663451, 0.027991378901661649, -0.0021425184069845558],
(0.675, 3.0): [-0.65661724764645824, -0.08147195494632696, 0.02345732427073333, -0.0017448570400999351],
(0.675, 4.0): [-0.65045677697461124, -0.071419073399450431, 0.020741962576852499, -0.0015171262565892491],
(0.675, 5.0): [-0.64718875357808325, -0.064720611425218344, 0.019053450246546449, -0.0013836232986228711],
(0.675, 6.0): [-0.64523003702018655, -0.059926313672731824, 0.017918997181483924, -0.0012992250285556828],
(0.675, 7.0): [-0.64403313148478836, -0.056248191513784476, 0.017091446791293721, -0.0012406558789511822],
(0.675, 8.0): [-0.64325095865764359, -0.053352543126426684, 0.016471879286491072, -0.0011991839050964099],
(0.675, 9.0): [-0.64271152754911653, -0.051023769620449078, 0.01599799600547195, -0.0011693637984597086],
(0.675, 10.0): [-0.64232244408502626, -0.049118327462884373, 0.015629704966568955, -0.0011477775513952285],
(0.675, 11.0): [-0.64203897854353564, -0.047524627960277892, 0.015334801262767227, -0.0011315057284007177],
(0.675, 12.0): [-0.64180344973512771, -0.046205907576003291, 0.015108290595438166, -0.0011207364514518488],
(0.675, 13.0): [-0.64162086456823342, -0.045076099336874231, 0.0149226565346125, -0.0011126140690497352],
(0.675, 14.0): [-0.64146906480198984, -0.044108523550512715, 0.014772954218646743, -0.0011069708562369386],
(0.675, 15.0): [-0.64133915151966603, -0.043273370927039825, 0.014651691599222836, -0.0011032216539514398],
(0.675, 16.0): [-0.64123237842752079, -0.042538925012463868, 0.014549992487506169, -0.0011005633864334021],
(0.675, 17.0): [-0.64113034037536609, -0.041905699463005854, 0.014470805560767184, -0.0010995286436738471],
(0.675, 18.0): [-0.64104137391561256, -0.041343885546229336, 0.014404563657113593, -0.0010991304223377683],
(0.675, 19.0): [-0.64096064882827297, -0.04084569291139839, 0.014350159655133801, -0.0010993656711121901],
(0.675, 20.0): [-0.64088647405089572, -0.040402175957178085, 0.014305769823654429, -0.0011001304776712105],
(0.675, 24.0): [-0.64063763965937837, -0.039034716348048545, 0.014196703837251648, -0.0011061961945598175],
(0.675, 30.0): [-0.64034987716294889, -0.037749651156941719, 0.014147040999127263, -0.0011188251352919833],
(0.675, 40.0): [-0.6399990514713938, -0.036583307574857803, 0.014172070700846548, -0.0011391004138624943],
(0.675, 60.0): [-0.63955586202430248, -0.035576938958184395, 0.014287299153378865, -0.0011675811805794236],
(0.675, 120.0): [-0.63899242674778622, -0.034763757512388853, 0.014500726912982405, -0.0012028491454427466],
(0.675, inf): [-0.63832682579247613, -0.034101476695520404, 0.014780921043580184, -0.0012366204114216408],
(0.75, 2.0): [-0.60684073638504454, -0.096375192078057031, 0.026567529471304554, -0.0019963228971914488],
(0.75, 3.0): [-0.57986144519102656, -0.078570292718034881, 0.021280637925009449, -0.0015329306898533772],
(0.75, 4.0): [-0.56820771686193594, -0.0668113563896649, 0.018065284051059189, -0.0012641485481533648],
(0.75, 5.0): [-0.56175292435740221, -0.058864526929603825, 0.016046735025708799, -0.0011052560286524044],
(0.75, 6.0): [-0.55773449282066356, -0.053136923269827351, 0.014684258167069347, -0.0010042826823561605],
(0.75, 7.0): [-0.55509524598867332, -0.048752649191139405, 0.013696566605823626, -0.00093482210003133898],
(0.75, 8.0): [-0.55324993686191515, -0.045305558708724644, 0.012959681992062138, -0.00088583541601696021],
(0.75, 9.0): [-0.55189259054026196, -0.042539819902381634, 0.012398791106424769, -0.00085083962241435827],
(0.75, 10.0): [-0.55085384656956893, -0.040281425755686585, 0.01196442242722482, -0.00082560322161492677],
(0.75, 11.0): [-0.55003198103541273, -0.038410176100193948, 0.011623294239447784, -0.00080732975034320073],
(0.75, 12.0): [-0.54936541596319177, -0.036838543267887103, 0.011351822637895701, -0.0007940703654926442],
(0.75, 13.0): [-0.54881015972753833, -0.035506710625568455, 0.011134691307865171, -0.0007846360016355809],
(0.75, 14.0): [-0.54834094346071949, -0.034364790609906569, 0.010958873929274728, -0.00077796645357008291],
(0.75, 15.0): [-0.54793602418304255, -0.033379237455748029, 0.010816140998057593, -0.00077344175064785099],
(0.75, 16.0): [-0.54758347689728037, -0.032520569145898917, 0.010699240399358219, -0.00077050847328596678],
(0.75, 17.0): [-0.54727115963795303, -0.031769277192927527, 0.010603749751170481, -0.0007688642392748113],
(0.75, 18.0): [-0.54699351808826535, -0.031105476267880995, 0.010524669113016114, -0.00076810656837464093],
(0.75, 19.0): [-0.54674357626419079, -0.030516967201954001, 0.010459478822937069, -0.00076808652582440037],
(0.75, 20.0): [-0.54651728378950126, -0.029992319199769232, 0.010405694998386575, -0.0007686417223966138],
(0.75, 24.0): [-0.54578309546828363, -0.028372628574010936, 0.010269939602271542, -0.00077427370647261838],
(0.75, 30.0): [-0.54501246434397554, -0.026834887880579802, 0.010195603314317611, -0.00078648615954105515],
(0.75, 40.0): [-0.54418127442022624, -0.025413224488871379, 0.010196455193836855, -0.00080610785749523739],
(0.75, 60.0): [-0.543265189207915, -0.024141961069146383, 0.010285001019536088, -0.00083332193364294587],
(0.75, 120.0): [-0.54224757817994806, -0.023039071833948214, 0.010463365295636302, -0.00086612828539477918],
(0.75, inf): [-0.54114579815367159, -0.02206592527426093, 0.01070374099737127, -0.00089726564005122183],
(0.8, 2.0): [-0.56895274046831146, -0.096326255190541957, 0.025815915364208686, -0.0019136561019354845],
(0.8, 3.0): [-0.5336038380862278, -0.077585191014876181, 0.020184759265389905, -0.0014242746007323785],
(0.8, 4.0): [-0.51780274285934258, -0.064987738443608709, 0.016713309796866204, -0.001135379856633562],
(0.8, 5.0): [-0.50894361222268403, -0.056379186603362705, 0.014511270339773345, -0.00096225604117493205],
(0.8, 6.0): [-0.50335153028630408, -0.050168860294790812, 0.01302807093593626, -0.00085269812692536306],
(0.8, 7.0): [-0.49960934380896432, -0.045417333787806033, 0.011955593330247398, -0.00077759605604250882],
(0.8, 8.0): [-0.49694518248979763, -0.041689151516021969, 0.011158986677273709, -0.00072497430103953366],
(0.8, 9.0): [-0.4949559974898507, -0.038702217132906024, 0.010554360004521268, -0.0006875213117164109],
(0.8, 10.0): [-0.49341407910162483, -0.036266788741325398, 0.010087354421936092, -0.00066060835062865602],
(0.8, 11.0): [-0.49218129312493897, -0.034252403643273498, 0.0097218584838579536, -0.00064123459335201907],
(0.8, 12.0): [-0.49117223957112183, -0.032563269730499021, 0.0094318583096021404, -0.00062725253852419032],
(0.8, 13.0): [-0.49032781145131277, -0.031132495018324432, 0.0091999762562792898, -0.0006172944366003854],
(0.8, 14.0): [-0.48961049628464259, -0.029906921170494854, 0.009012451847823854, -0.00061026211968669543],
(0.8, 15.0): [-0.48899069793054922, -0.028849609914548158, 0.0088602820002619594, -0.00060548991575179055],
(0.8, 16.0): [-0.48844921216636505, -0.027929790075266154, 0.00873599263877896, -0.00060242119796859379],
(0.8, 17.0): [-0.48797119683309537, -0.027123634910159868, 0.0086338139869481887, -0.00060061821593399998],
(0.8, 18.0): [-0.48754596864745836, -0.026411968723496961, 0.0085493196604705755, -0.00059977083160833624],
(0.8, 19.0): [-0.48716341805691843, -0.025781422230819986, 0.0084796655915025769, -0.00059970031758323466],
(0.8, 20.0): [-0.48681739197185547, -0.025219629852198749, 0.0084221844254287765, -0.00060023212822886711],
(0.8, 24.0): [-0.48570639629281365, -0.023480608772518948, 0.008274490561114187, -0.000605681105792215],
(0.8, 30.0): [-0.48455867067770253, -0.021824655071720423, 0.0081888502974720567, -0.00061762126933785633],
(0.8, 40.0): [-0.48335478729267423, -0.020279958998363389, 0.0081765095914194709, -0.00063657117129829635],
(0.8, 60.0): [-0.48207351944996679, -0.018875344346672228, 0.0082473997191472338, -0.00066242478479277243],
(0.8, 120.0): [-0.48070356185330182, -0.017621686995755746, 0.0084009638803223801, -0.00069300383808949318],
(0.8, inf): [-0.47926687718713606, -0.016476575352367202, 0.0086097059646591811, -0.00072160843492730911],
(0.85, 2.0): [-0.53366806986381743, -0.098288178252723263, 0.026002333446289064, -0.0019567144268844896],
(0.85, 3.0): [-0.48995919239619989, -0.077312722648418056, 0.019368984865418108, -0.0013449670192265796],
(0.85, 4.0): [-0.46956079162382858, -0.063818518513946695, 0.015581608910696544, -0.0010264315084377606],
(0.85, 5.0): [-0.45790853796153624, -0.054680511194530226, 0.013229852432203093, -0.00084248430847535898],
(0.85, 6.0): [-0.4505070841695738, -0.048050936682873302, 0.011636407582714191, -0.00072491480033529815],
(0.85, 7.0): [-0.44548337477336181, -0.042996612516383016, 0.010493052959891263, -0.00064528784792153239],
(0.85, 8.0): [-0.44186624932664148, -0.039040005821657585, 0.0096479530794160544, -0.00058990874360967567],
(0.85, 9.0): [-0.43914118689812259, -0.035875693030752713, 0.0090088804130628187, -0.00055071480339399694],
(0.85, 10.0): [-0.43701255390953769, -0.033300997407157376, 0.0085172159355344848, -0.00052272770799695464],
(0.85, 11.0): [-0.43530109064899053, -0.031174742038490313, 0.0081335619868386066, -0.00050268353809787927],
(0.85, 12.0): [-0.43389220376610071, -0.02939618314990838, 0.007830626267772851, -0.00048836431712678222],
(0.85, 13.0): [-0.43271026958463166, -0.027890759135246888, 0.0075886916668632936, -0.00047819339710596971],
(0.85, 14.0): [-0.43170230265007209, -0.026604156062396189, 0.0073939099688705547, -0.00047109996854335419],
(0.85, 15.0): [-0.43083160459377423, -0.025494228911600785, 0.0072358738657550868, -0.00046630677052262481],
(0.85, 16.0): [-0.4300699280587239, -0.024529612608808794, 0.0071069227026219683, -0.00046323869860941791],
(0.85, 17.0): [-0.42939734931902857, -0.023685025616054269, 0.0070011541609695891, -0.00046147954942994158],
(0.85, 18.0): [-0.42879829041505324, -0.022940655682782165, 0.006914006369119409, -0.00046070877994711774],
(0.85, 19.0): [-0.42826119448419875, -0.022280181781634649, 0.0068417746905826433, -0.00046066841214091982],
(0.85, 20.0): [-0.42777654887094479, -0.021690909076747832, 0.0067817408643717969, -0.00046118620289068032],
(0.85, 24.0): [-0.42622450033640852, -0.019869646711890065, 0.0066276799593494029, -0.00046668820637553747],
(0.85, 30.0): [-0.42463810443233418, -0.018130114737381745, 0.0065344613060499164, -0.00047835583417510423],
(0.85, 40.0): [-0.42299917804589382, -0.016498222901308417, 0.0065120558343578407, -0.00049656043685325469],
(0.85, 60.0): [-0.42129387265810464, -0.014992121475265813, 0.0065657795990087635, -0.00052069705640687698],
(0.85, 120.0): [-0.41951580476366368, -0.013615722489371183, 0.0066923911275726814, -0.00054846911649167492],
(0.85, inf): [-0.41768751825428968, -0.012327525092266726, 0.0068664920569562592, -0.00057403720261753539],
(0.9, 1.0): [-0.65851063279096722, -0.126716242078905, 0.036318801917603061, -0.002901283222928193],
(0.9, 2.0): [-0.50391945369829139, -0.096996108021146235, 0.024726437623473398, -0.0017901399938303017],
(0.9, 3.0): [-0.44799791843058734, -0.077180370333307199, 0.018584042055594469, -0.0012647038118363408],
(0.9, 4.0): [-0.42164091756145167, -0.063427071006287514, 0.014732203755741392, -0.00094904174117957688],
(0.9, 5.0): [-0.40686856251221754, -0.053361940054842398, 0.012041802076025801, -0.00072960198292410612],
(0.9, 6.0): [-0.39669926026535285, -0.046951517438004242, 0.010546647213094956, -0.00062621198002366064],
(0.9, 7.0): [-0.39006553675807426, -0.04169480606532109, 0.0093687546601737195, -0.00054648695713273862],
(0.9, 8.0): [-0.38570205067061908, -0.037083910859179794, 0.0083233218526375836, -0.00047177586974035451],
(0.9, 9.0): [-0.38190737267892938, -0.034004585655388865, 0.0077531991574119183, -0.00044306547308527872],
(0.9, 10.0): [-0.37893272918125737, -0.031394677600916979, 0.0072596802503533536, -0.0004160518834299966],
(0.9, 11.0): [-0.37692512492705132, -0.028780793403136471, 0.0066937909049060379, -0.00037420010136784526],
(0.9, 12.0): [-0.37506345200129187, -0.026956483290567372, 0.0064147730707776523, -0.00036595383207062906],
(0.9, 13.0): [-0.37339516122383209, -0.02543949524844704, 0.0061760656530197187, -0.00035678737379179527],
(0.9, 14.0): [-0.37216979891087842, -0.02396347606956644, 0.0059263234465969641, -0.0003439784452550796],
(0.9, 15.0): [-0.371209456600122, -0.022696132732654414, 0.0057521677184623147, -0.00033961108561770848],
(0.9, 16.0): [-0.36958924377983338, -0.022227885445863002, 0.0057691706799383926, -0.00035042762538099682],
(0.9, 17.0): [-0.36884224719083203, -0.021146977888668726, 0.0055957928269732716, -0.00034283810412697531],
(0.9, 18.0): [-0.36803087186793326, -0.020337731477576542, 0.0054655378095212759, -0.00033452966946535248],
(0.9, 19.0): [-0.3676700404163355, -0.019370115848857467, 0.0053249296207149655, -0.00032975528909580403],
(0.9, 20.0): [-0.36642276267188811, -0.019344251412284838, 0.0054454968582897528, -0.00034868111677540948],
(0.9, 24.0): [-0.36450650753755193, -0.017284255499990679, 0.0052337500059176749, -0.00034898202845747288],
(0.9, 30.0): [-0.36251868940168608, -0.015358560437631397, 0.0050914299956134786, -0.00035574528891633978],
(0.9, 40.0): [-0.36008886676510943, -0.014016835682905486, 0.0051930835959111514, -0.00038798316011984165],
(0.9, 60.0): [-0.35825590690268061, -0.011991568926537646, 0.0050632208542414191, -0.00039090198974493085],
(0.9, 120.0): [-0.35543612237284411, -0.011074403997811812, 0.0053504570752765162, -0.00043647137428074178],
(0.9, inf): [-0.35311806343057167, -0.0096254020092145353, 0.0054548591208177181, -0.00045343916634968493],
(0.95, 1.0): [-0.65330318136020071, -0.12638310760474375, 0.035987535130769424, -0.0028562665467665315],
(0.95, 2.0): [-0.47225160417826934, -0.10182570362271424, 0.025846563499059158, -0.0019096769058043243],
(0.95, 3.0): [-0.4056635555586528, -0.077067172693350297, 0.017789909647225533, -0.001182961668735774],
(0.95, 4.0): [-0.37041675177340955, -0.063815687118939465, 0.014115210247737845, -0.00089996098435117598],
(0.95, 5.0): [-0.35152398291152309, -0.052156502640669317, 0.010753738086401853, -0.0005986841939451575],
(0.95, 6.0): [-0.33806730015201264, -0.045668399809578597, 0.0093168898952878162, -0.00051369719615782102],
(0.95, 7.0): [-0.32924041072104465, -0.040019601775490091, 0.0080051199552865163, -0.00042054536135868043],
(0.95, 8.0): [-0.32289030266989077, -0.035575345931670443, 0.0070509089344694669, -0.00035980773304803576],
(0.95, 9.0): [-0.31767304201477375, -0.032464945930165703, 0.0064755950437272143, -0.0003316676253661824],
(0.95, 10.0): [-0.31424318064708656, -0.029133461621153, 0.0057437449431074795, -0.00027894252261209191],
(0.95, 11.0): [-0.31113589620384974, -0.02685115250591049, 0.0053517905282942889, -0.00026155954116874666],
(0.95, 12.0): [-0.30848983612414582, -0.025043238019239168, 0.0050661675913488829, -0.00025017202909614005],
(0.95, 13.0): [-0.3059212907410393, -0.023863874699213077, 0.0049618051135807322, -0.00025665425781125703],
(0.95, 14.0): [-0.30449676902720035, -0.021983976741572344, 0.0045740513735751968, -0.00022881166323945914],
(0.95, 15.0): [-0.30264908294481396, -0.02104880307520084, 0.0044866571614804382, -0.00023187587597844057],
(0.95, 16.0): [-0.30118294463097917, -0.020160231061926728, 0.0044170780759056859, -0.00023733502359045826],
(0.95, 17.0): [-0.30020013353427744, -0.018959271614471574, 0.0041925333038202285, -0.00022274025630789767],
(0.95, 18.0): [-0.29857886556874402, -0.018664437456802001, 0.0042557787632833697, -0.00023758868868853716],
(0.95, 19.0): [-0.29796289236978263, -0.017632218552317589, 0.0040792779937959866, -0.00022753271474613109],
(0.95, 20.0): [-0.29681506554838077, -0.017302563243037392, 0.0041188426221428964, -0.00023913038468772782],
(0.95, 24.0): [-0.29403146911167666, -0.015332330986025032, 0.0039292170319163728, -0.00024003445648641732],
(0.95, 30.0): [-0.29080775563775879, -0.013844059210779323, 0.0039279165616059892, -0.00026085104496801666],
(0.95, 40.0): [-0.28821583032805109, -0.011894686715666892, 0.0038202623278839982, -0.00026933325102031252],
(0.95, 60.0): [-0.28525636737751447, -0.010235910558409797, 0.0038147029777580001, -0.00028598362144178959],
(0.95, 120.0): [-0.28241065885026539, -0.0086103836327305026, 0.0038450612886908714, -0.00030206053671559411],
(0.95, inf): [-0.27885570064169296, -0.0078122455524849222, 0.0041798538053623453, -0.0003469494881774609],
(0.975, 1.0): [-0.65203598304297983, -0.12608944279227957, 0.035710038757117347, -0.0028116024425349053],
(0.975, 2.0): [-0.46371891130382281, -0.096954458319996509, 0.023958312519912289, -0.0017124565391080503],
(0.975, 3.0): [-0.38265282195259875, -0.076782539231612282, 0.017405078796142955, -0.0011610853687902553],
(0.975, 4.0): [-0.34051193158878401, -0.063652342734671602, 0.013528310336964293, -0.00083644708934990761],
(0.975, 5.0): [-0.31777655705536484, -0.051694686914334619, 0.010115807205265859, -0.00054517465344192009],
(0.975, 6.0): [-0.30177149019958716, -0.044806697631189059, 0.008483551848413786, -0.00042827853925009264],
(0.975, 7.0): [-0.29046972313293562, -0.039732822689098744, 0.007435356037378946, -0.00037562928283350671],
(0.975, 8.0): [-0.28309484007368141, -0.034764904940713388, 0.0062932513694928518, -0.00029339243611357956],
(0.975, 9.0): [-0.27711707948119785, -0.031210465194810709, 0.0055576244284178435, -0.00024663798208895803],
(0.975, 10.0): [-0.27249203448553611, -0.028259756468251584, 0.00499112012528406, -0.00021535380417035389],
(0.975, 11.0): [-0.26848515860011007, -0.026146703336893323, 0.0046557767110634073, -0.00020400628148271448],
(0.975, 12.0): [-0.26499921540008192, -0.024522931106167097, 0.0044259624958665278, -0.00019855685376441687],
(0.975, 13.0): [-0.2625023751891592, -0.022785875653297854, 0.004150277321193792, -0.00018801223218078264],
(0.975, 14.0): [-0.26038552414321758, -0.021303509859738341, 0.0039195608280464681, -0.00017826200169385824],
(0.975, 15.0): [-0.25801244886414665, -0.020505508012402567, 0.0038754868932712929, -0.00018588907991739744],
(0.975, 16.0): [-0.25685316062360508, -0.018888418269740373, 0.0035453092842317293, -0.00016235770674204116],
(0.975, 17.0): [-0.25501132271353549, -0.018362951972357794, 0.0035653933105288631, -0.00017470353354992729],
(0.975, 18.0): [-0.25325045404452656, -0.017993537285026156, 0.0036035867405376691, -0.00018635492166426884],
(0.975, 19.0): [-0.25236899494677928, -0.016948921372207198, 0.0034138931781330802, -0.00017462253414687881],
(0.975, 20.0): [-0.25134498025027691, -0.016249564498874988, 0.0033197284005334333, -0.00017098091103245596],
(0.975, 24.0): [-0.24768690797476625, -0.014668160763513996, 0.0032850791186852558, -0.00019013480716844995],
(0.975, 30.0): [-0.24420834707522676, -0.012911171716272752, 0.0031977676700968051, -0.00020114907914487053],
(0.975, 40.0): [-0.24105725356215926, -0.010836526056169627, 0.0030231303550754159, -0.00020128696343148667],
(0.975, 60.0): [-0.23732082703955223, -0.0095442727157385391, 0.0031432904473555259, -0.00023062224109383941],
(0.975, 120.0): [-0.23358581879594578, -0.0081281259918709343, 0.0031877298679120094, -0.00024496230446851501],
(0.975, inf): [-0.23004105093119268, -0.0067112585174133573, 0.0032760251638919435, -0.00026244001319462992],
(0.99, 1.0): [-0.65154119422706203, -0.1266603927572312, 0.03607480609672048, -0.0028668112687608113],
(0.99, 2.0): [-0.45463403324378804, -0.098701236234527367, 0.024412715761684689, -0.0017613772919362193],
(0.99, 3.0): [-0.36402060051035778, -0.079244959193729148, 0.017838124021360584, -0.00119080116484847],
(0.99, 4.0): [-0.31903506063953818, -0.061060740682445241, 0.012093154962939612, -0.00067268347188443093],
(0.99, 5.0): [-0.28917014580689182, -0.052940780099313689, 0.010231009146279354, -0.00057178339184615239],
(0.99, 6.0): [-0.27283240161179012, -0.042505435573209085, 0.0072753401118264534, -0.00031314034710725922],
(0.99, 7.0): [-0.25773968720546719, -0.039384214480463406, 0.0069120882597286867, -0.00032994068754356204],
(0.99, 8.0): [-0.24913629282433833, -0.033831567178432859, 0.0055516244725724185, -0.00022570786249671376],
(0.99, 9.0): [-0.24252380896373404, -0.029488280751457097, 0.0045215453527922998, -0.00014424552929022646],
(0.99, 10.0): [-0.23654349556639986, -0.02705600214566789, 0.0041627255469343632, -0.00013804427029504753],
(0.99, 11.0): [-0.23187404969432468, -0.024803662094970855, 0.0037885852786822475, -0.00012334999287725012],
(0.99, 12.0): [-0.22749929386320905, -0.023655085290534145, 0.0037845051889055896, -0.00014785715789924055],
(0.99, 13.0): [-0.22458989143485605, -0.021688394892771506, 0.0034075294601425251, -0.00012436961982044268],
(0.99, 14.0): [-0.22197623872225777, -0.020188830700102918, 0.0031648685865587473, -0.00011320740119998819],
(0.99, 15.0): [-0.2193924323730066, -0.019327469111698265, 0.0031295453754886576, -0.00012373072900083014],
(0.99, 16.0): [-0.21739436875855705, -0.018215854969324128, 0.0029638341057222645, -0.00011714667871412003],
(0.99, 17.0): [-0.21548926805467686, -0.017447822179412719, 0.0028994805120482812, -0.00012001887015183794],
(0.99, 18.0): [-0.21365014687077843, -0.01688869353338961, 0.0028778031289216546, -0.00012591199104792711],
(0.99, 19.0): [-0.21236653761262406, -0.016057151563612645, 0.0027571468998022017, -0.00012049196593780046],
(0.99, 20.0): [-0.21092693178421842, -0.015641706950956638, 0.0027765989877361293, -0.00013084915163086915],
(0.99, 24.0): [-0.20681960327410207, -0.013804298040271909, 0.0026308276736585674, -0.0001355061502101814],
(0.99, 30.0): [-0.20271691131071576, -0.01206095288359876, 0.0025426138004198909, -0.00014589047959047533],
(0.99, 40.0): [-0.19833098054449289, -0.010714533963740719, 0.0025985992420317597, -0.0001688279944262007],
(0.99, 60.0): [-0.19406768821236584, -0.0093297106482013985, 0.0026521518387539584, -0.00018884874193665104],
(0.99, 120.0): [-0.19010213174677365, -0.0075958207221300924, 0.0025660823297025633, -0.00018906475172834352],
(0.99, inf): [-0.18602070255787137, -0.0062121155165363188, 0.0026328293420766593, -0.00020453366529867131],
(0.995, 1.0): [-0.65135583544951825, -0.1266868999507193, 0.036067522182457165, -0.0028654516958844922],
(0.995, 2.0): [-0.45229774013072793, -0.09869462954369547, 0.024381858599368908, -0.0017594734553033394],
(0.995, 3.0): [-0.35935765236429706, -0.076650408326671915, 0.016823026893528978, -0.0010835134496404637],
(0.995, 4.0): [-0.30704474720931169, -0.063093047731613019, 0.012771683306774929, -0.00075852491621809955],
(0.995, 5.0): [-0.27582551740863454, -0.052533353137885791, 0.0097776009845174372, -0.00051338031756399129],
(0.995, 6.0): [-0.25657971464398704, -0.043424914996692286, 0.0074324147435969991, -0.00034105188850494067],
(0.995, 7.0): [-0.24090407819707738, -0.039591604712200287, 0.0068848429451020387, -0.00034737131709273414],
(0.995, 8.0): [-0.23089540800827862, -0.034353305816361958, 0.0056009527629820111, -0.00024389336976992433],
(0.995, 9.0): [-0.22322694848310584, -0.030294770709722547, 0.0046751239747245543, -0.00017437479314218922],
(0.995, 10.0): [-0.21722684126671632, -0.026993563560163809, 0.0039811592710905491, -0.00013135281785826703],
(0.995, 11.0): [-0.21171635822852911, -0.025156193618212551, 0.0037507759652964205, -0.00012959836685175671],
(0.995, 12.0): [-0.20745332165849167, -0.023318819535607219, 0.0034935020002058903, -0.00012642826898405916],
(0.995, 13.0): [-0.20426054591612508, -0.021189796175249527, 0.003031472176128759, -9.0497733877531618e-05],
(0.995, 14.0): [-0.20113536905578902, -0.020011536696623061, 0.0029215880889956729, -9.571527213951222e-05],
(0.995, 15.0): [-0.19855601561006403, -0.018808533734002542, 0.0027608859956002344, -9.2472995256929217e-05],
(0.995, 16.0): [-0.19619157579534008, -0.017970461530551096, 0.0027113719105000371, -9.9864874982890861e-05],
(0.995, 17.0): [-0.19428015140726104, -0.017009762497670704, 0.0025833389598201345, -9.6137545738061124e-05],
(0.995, 18.0): [-0.19243180236773033, -0.01631617252107519, 0.0025227443561618621, -9.8067580523432881e-05],
(0.995, 19.0): [-0.19061294393069844, -0.01586226613672222, 0.0025207005902641781, -0.00010466151274918466],
(0.995, 20.0): [-0.18946302696580328, -0.014975796567260896, 0.0023700506576419867, -9.5507779057884629e-05],
(0.995, 24.0): [-0.18444251428695257, -0.013770955893918012, 0.0024579445553339903, -0.00012688402863358003],
(0.995, 30.0): [-0.18009742499570078, -0.011831341846559026, 0.0022801125189390046, -0.00012536249967254906],
(0.995, 40.0): [-0.17562721880943261, -0.010157142650455463, 0.0022121943861923474, -0.000134542652873434],
(0.995, 60.0): [-0.17084630673594547, -0.0090224965852754805, 0.0023435529965815565, -0.00016240306777440115],
(0.995, 120.0): [-0.16648414081054147, -0.0074792163241677225, 0.0023284585524533607, -0.00017116464012147041],
(0.995, inf): [-0.16213921875452461, -0.0058985998630496144, 0.0022605819363689093, -0.00016896211491119114],
(0.999, 1.0): [-0.65233994072089363, -0.12579427445444219, 0.035830577995679271, -0.0028470555202945564],
(0.999, 2.0): [-0.45050164311326341, -0.098294804380698292, 0.024134463919493736, -0.0017269603956852841],
(0.999, 3.0): [-0.35161741499307819, -0.076801152272374273, 0.016695693063138672, -0.0010661121974071864],
(0.999, 4.0): [-0.29398448788574133, -0.06277319725219685, 0.012454220010543127, -0.00072644165723402445],
(0.999, 5.0): [-0.25725364564365477, -0.053463787584337355, 0.0099664236557431545, -0.00054866039388980659],
(0.999, 6.0): [-0.23674225795168574, -0.040973155890031254, 0.0062599481191736696, -0.00021565734226586692],
(0.999, 7.0): [-0.21840108878983297, -0.037037020271877719, 0.0055908063671900703, -0.00020238790479809623],
(0.999, 8.0): [-0.2057964743918449, -0.032500885103194356, 0.0046441644585661756, -0.00014769592268680274],
(0.999, 9.0): [-0.19604592954882674, -0.029166922919677936, 0.0040644333111949814, -0.00012854052861297006],
(0.999, 10.0): [-0.18857328935948367, -0.026316705703161091, 0.0035897350868809275, -0.00011572282691335702],
(0.999, 11.0): [-0.18207431428535406, -0.024201081944369412, 0.0031647372098056077, -8.1145935982296439e-05],
(0.999, 12.0): [-0.17796358148991101, -0.021054306118620879, 0.0023968085939602055, -1.5907156771296993e-05],
(0.999, 13.0): [-0.17371965962745489, -0.019577162950177709, 0.0022391783473999739, -2.0613023472812558e-05],
(0.999, 14.0): [-0.16905298116759873, -0.01967115985443986, 0.0026495208325889269, -9.1074275220634073e-05],
(0.999, 15.0): [-0.16635662558214312, -0.017903767183469876, 0.0022301322677100496, -5.1956773935885426e-05],
(0.999, 16.0): [-0.16388776549525449, -0.016671918839902419, 0.0020365289602744382, -4.3592447599724942e-05],
(0.999, 17.0): [-0.16131934177990759, -0.015998918405126326, 0.0019990454743285904, -4.8176277491327653e-05],
(0.999, 18.0): [-0.15880633110376571, -0.015830715141055916, 0.0021688405343832091, -8.061825248932771e-05],
(0.999, 19.0): [-0.15644841913314136, -0.015729364721105681, 0.0022981443610378136, -0.00010093672643417343],
(0.999, 20.0): [-0.15516596606222705, -0.014725095968258637, 0.0021117117014292155, -8.8806880297328484e-05],
(0.999, 24.0): [-0.14997437768645827, -0.012755323295476786, 0.0018871651510496939, -8.0896370662414938e-05],
(0.999, 30.0): [-0.14459974882323703, -0.011247323832877647, 0.0018637400643826279, -9.6415323191606741e-05],
(0.999, 40.0): [-0.13933285919392555, -0.0097151769692496587, 0.0018131251876208683, -0.00010452598991994023],
(0.999, 60.0): [-0.13424555343804143, -0.0082163027951669444, 0.0017883427892173382, -0.00011415865110808405],
(0.999, 120.0): [-0.12896119523040372, -0.0070426701112581112, 0.0018472364154226955, -0.00012862202979478294],
(0.999, inf): [-0.12397213562666673, -0.0056901201604149998, 0.0018260689406957129, -0.00013263452567995485]}
# p values that are defined in the A table
p_keys = [.1,.5,.675,.75,.8,.85,.9,.95,.975,.99,.995,.999]
# v values that are defined in the A table
v_keys = range(2, 21) + [24, 30, 40, 60, 120, inf]
def _isfloat(x):
"""
returns True if x is a float,
returns False otherwise
"""
try:
float(x)
except:
return False
return True
##def _phi(p):
## """returns the pth quantile inverse norm"""
## return scipy.stats.norm.isf(p)
def _phi( p ):
# this function is faster than using scipy.stats.norm.isf(p)
# but the permissity of the license isn't explicitly listed.
# using scipy.stats.norm.isf(p) is an acceptable alternative
"""
Modified from the author's original perl code (original comments follow below)
by [email protected]. May 3, 2004.
Lower tail quantile for standard normal distribution function.
This function returns an approximation of the inverse cumulative
standard normal distribution function. I.e., given P, it returns
an approximation to the X satisfying P = Pr{Z <= X} where Z is a
random variable from the standard normal distribution.
The algorithm uses a minimax approximation by rational functions
and the result has a relative error whose absolute value is less
than 1.15e-9.
Author: Peter John Acklam
Time-stamp: 2000-07-19 18:26:14
E-mail: [email protected]
WWW URL: http://home.online.no/~pjacklam
"""
if p <= 0 or p >= 1:
# The original perl code exits here, we'll throw an exception instead
raise ValueError( "Argument to ltqnorm %f must be in open interval (0,1)" % p )
# Coefficients in rational approximations.
a = (-3.969683028665376e+01, 2.209460984245205e+02, \
-2.759285104469687e+02, 1.383577518672690e+02, \
-3.066479806614716e+01, 2.506628277459239e+00)
b = (-5.447609879822406e+01, 1.615858368580409e+02, \
-1.556989798598866e+02, 6.680131188771972e+01, \
-1.328068155288572e+01 )
c = (-7.784894002430293e-03, -3.223964580411365e-01, \
-2.400758277161838e+00, -2.549732539343734e+00, \
4.374664141464968e+00, 2.938163982698783e+00)
d = ( 7.784695709041462e-03, 3.224671290700398e-01, \
2.445134137142996e+00, 3.754408661907416e+00)
# Define break-points.
plow = 0.02425
phigh = 1 - plow
# Rational approximation for lower region:
if p < plow:
q = math.sqrt(-2*math.log(p))
return -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
# Rational approximation for upper region:
if phigh < p:
q = math.sqrt(-2*math.log(1-p))
return (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)
# Rational approximation for central region:
q = p - 0.5
r = q*q
return -(((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q / \
(((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1)
def _ptransform(p):
"""function for p-value abcissa transformation"""
return -1. / (1. + 1.5 * _phi((1. + p)/2.))
def _func(a, p, r, v):
"""
calculates f-hat for the coefficients in a, probability p,
sample mean difference r, and degrees of freedom v.
"""
# eq. 2.3
f = a[0]*math.log(r-1.) + \
a[1]*math.log(r-1.)**2 + \
a[2]*math.log(r-1.)**3 + \
a[3]*math.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
if r == 3:
f += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
f += 1./517. - 1./(312.*(v,1e38)[np.isinf(v)])
else:
f += 1./(191.*(v,1e38)[np.isinf(v)])
return -f
def _select_ps(p):
# There are more generic ways of doing this but profiling
# revealed that selecting these points is one of the slow
# things that is easy to change. This is about 11 times
# faster than the generic algorithm it is replacing.
#
# it is possible that different break points could yield
# better estimates, but the function this is refactoring
# just used linear distance.
"""returns the points to use for interpolating p"""
if p >= .99:
return .990, .995, .999
elif p >= .975:
return .975, .990, .995
elif p >= .95:
return .950, .975, .990
elif p >= .9125:
return .900, .950, .975
elif p >= .875:
return .850, .900, .950
elif p >= .825:
return .800, .850, .900
elif p >= .7625:
return .750, .800, .850
elif p >= .675:
return .675, .750, .800
elif p >= .500:
return .500, .675, .750
else:
return .100, .500, .675
def _interpolate_p(p, r, v):
"""
interpolates p based on the values in the A table for the
scalar value of r and the scalar value of v
"""
# interpolate p (v should be in table)
# if .5 < p < .75 use linear interpolation in q
# if p > .75 use quadratic interpolation in log(y + r/v)
# by -1. / (1. + 1.5 * _phi((1. + p)/2.))
# find the 3 closest v values
p0, p1, p2 = _select_ps(p)
try:
y0 = _func(A[(p0, v)], p0, r, v) + 1.
except:
print p,r,v
y1 = _func(A[(p1, v)], p1, r, v) + 1.
y2 = _func(A[(p2, v)], p2, r, v) + 1.
y_log0 = math.log(y0 + float(r)/float(v))
y_log1 = math.log(y1 + float(r)/float(v))
y_log2 = math.log(y2 + float(r)/float(v))
# If p < .85 apply only the ordinate transformation
# if p > .85 apply the ordinate and the abcissa transformation
# In both cases apply quadratic interpolation
if p > .85:
p_t = _ptransform(p)
p0_t = _ptransform(p0)
p1_t = _ptransform(p1)
p2_t = _ptransform(p2)
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \
(y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)
else:
d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)
d0 = y_log1
# interpolate value
y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
elif p > .5:
# calculate derivatives for quadratic interpolation
d2 = 2*((y_log2-y_log1)/(p2-p1) - \
(y_log1-y_log0)/(p1-p0))/(p2-p0)
if (p2+p0)>=(p1+p1):
d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)
else:
d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)
d0 = y_log1
# interpolate values
y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0
# transform back to y
y = math.exp(y_log) - float(r)/float(v)
else:
# linear interpolation in q and p
q0 = math.sqrt(2) * -y0 * \
scipy.stats.t.isf((1.+p0)/2., (v,1e38)[v>1e38])
q1 = math.sqrt(2) * -y1 * \
scipy.stats.t.isf((1.+p1)/2., (v,1e38)[v>1e38])
d1 = (q1-q0)/(p1-p0)
d0 = q0
# interpolate values
q = d1 * (p-p0) + d0
# transform back to y
y = -q / (math.sqrt(2) * \
scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38]))
return y
def _select_vs(v, p):
# This one is is about 30 times faster than
# the generic algorithm it is replacing.
"""returns the points to use for interpolating v"""
if v >= 120.:
return 60, 120, inf
elif v >= 60.:
return 40, 60, 120
elif v >= 40.:
return 30, 40, 60
elif v >= 30.:
return 24, 30, 40
elif v >= 24.:
return 20, 24, 30
elif v >= 19.5:
return 19, 20, 24
if p >= .9:
if v < 2.5:
return 1, 2, 3
else:
if v < 3.5:
return 2, 3, 4
vi = int(round(v))
return vi - 1, vi, vi + 1
def _interpolate_v(p, r, v):
"""
interpolates v based on the values in the A table for the
scalar value of r and th
"""
# interpolate v (p should be in table)
# ordinate: y**2
# abcissa: 1./v
# find the 3 closest v values
# only p >= .9 have table values for 1 degree of freedom.
# The boolean is used to index the tuple and append 1 when
# p >= .9
v0, v1, v2 = _select_vs(v, p)
# y = f - 1.
y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2.
y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2.
y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2.
# if v2 is inf set to a big number so interpolation
# calculations will work
if v2 > 1e38: v2 = 1e38
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \
(y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = y1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
return y
def _qsturng(p, r, v):
"""scalar version of qsturng"""
## print 'q',p
# r is interpolated through the q to y here we only need to
# account for when p and/or v are not found in the table.
global A, p_keys, v_keys
if p < .1 or p > .999:
raise ValueError('p must be between .1 and .999')
if p < .9:
if v < 2:
raise ValueError('v must be > 2 when p < .9')
else:
if v < 1:
raise ValueError('v must be > 1 when p >= .9')
# The easy case. A tabled value is requested.
if A.has_key((p,v)):
y = _func(A[(p,v)], p, r, v) + 1.
elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:
# apply bilinear (quadratic) interpolation
#
# p0,v2 + o + p1,v2 + p2,v2
# r2
#
# 1
# - (p,v)
# v x
#
# r1
# p0,v1 + o + p1,v1 + p2,v1
#
#
# p0,v0 + o r0 + p1,v0 + p2,v0
#
# _ptransform(p)
#
# (p1 and v1 may be below or above (p,v). The algorithm
# works in both cases. For diagramatic simplicity it is
# shown as above)
#
# 1. at v0, v1, and v2 use quadratic interpolation
# to find r0, r1, r2
#
# 2. use r0, r1, r2 and quadratic interpolaiton
# to find y and (p,v)
# find the 3 closest v values
v0, v1, v2 = _select_vs(v, p)
# find the 3 closest p values
p0, p1, p2 = _select_ps(p)
# calculate r0, r1, and r2
r0_sq = _interpolate_p(p, r, v0)**2
r1_sq = _interpolate_p(p, r, v1)**2
r2_sq = _interpolate_p(p, r, v2)**2
# transform v
v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2
# calculate derivatives for quadratic interpolation
d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \
(r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_)
if (v2_ + v0_) >= (v1_ + v1_):
d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)
else:
d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)
d0 = r1_sq
# calculate y
y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)
elif v not in v_keys+([],[1])[p>=.90]:
y = _interpolate_v(p, r, v)
elif p not in p_keys:
y = _interpolate_p(p, r, v)
return math.sqrt(2) * -y * \
scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38])
# make a qsturng functinon that will accept list-like objects
_vqsturng = np.vectorize(_qsturng)
_vqsturng.__doc__ = """vector version of qsturng"""
def qsturng(p, r, v):
"""Approximates the quantile p for a studentized range
distribution having v degrees of freedom and r samples
for probability p.
Parameters
----------
p : (scalar, array_like)
The cumulative probability value
p >= .1 and p <=.999
(values under .5 are not recommended)
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
q : (scalar, array_like)
approximation of the Studentized Range
"""
if all(map(_isfloat, [p, r, v])):
return _qsturng(p, r, v)
return _vqsturng(p, r, v)
##def _qsturng0(p, r, v):
#### print 'q0',p
## """
## returns a first order approximation of q studentized range
## value. Based on Lund and Lund's 1983 based on the FORTRAN77
## algorithm AS 190.2 Appl. Statist. (1983).
## """
## vmax = 120.
## c = [0.8843, 0.2368, 1.214, 1.208, 1.4142]
##
## t = -_phi(.5+.5*p)
## if (v < vmax):
## t += (t**3. + t) / float(v) / 4.
##
## q = c[0] - c[1] * t
## if (v < vmax):
## q = q - c[2] / float(v) + c[3] * t / float(v)
## q = t * (q * math.log(r - 1.) + c[4])
##
## # apply "bar napkin" correction for when p < .85
## # this is good enough for our intended purpose
## if p < .85:
## q += math.log10(r) * 2.25 * (.85-p)
## return q
def _psturng(q, r, v):
"""scalar version of psturng"""
if q < 0.:
raise ValueError('q should be >= 0')
opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)
if v == 1:
if q < _qsturng(.9, r, 1):
return .1
elif q > _qsturng(.999, r, 1):
return .001
return 1. - fminbound(opt_func, .9, .999, args=(r,v))
else:
if q < _qsturng(.1, r, v):
return .9
elif q > _qsturng(.999, r, v):
return .001
return 1. - fminbound(opt_func, .1, .999, args=(r,v))
_vpsturng = np.vectorize(_psturng)
_vpsturng.__doc__ = """vector version of psturng"""
def psturng(q, r, v):
"""Evaluates the probability from 0 to q for a studentized
range having v degrees of freedom and r samples.
Parameters
----------
q : (scalar, array_like)
quantile value of Studentized Range
q >= 0.
r : (scalar, array_like)
The number of samples
r >= 2 and r <= 200
(values over 200 are permitted but not recommended)
v : (scalar, array_like)
The sample degrees of freedom
if p >= .9:
v >=1 and v >= inf
else:
v >=2 and v >= inf
Returns
-------
p : (scalar, array_like)
1. - area from zero to q under the Studentized Range
distribution. When v == 1, p is bound between .001
and .1, when v > 1, p is bound between .001 and .9.
Values between .5 and .9 are 1st order appoximations.
"""
if all(map(_isfloat, [q, r, v])):
return _psturng(q, r, v)
return _vpsturng(q, r, v)
##p, r, v = .9, 10, 20
##print
##print 'p and v interpolation'
##print '\t20\t22\t24'
##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)
##print '.85',qsturng(.85, r, 20),qsturng(.85, r, 22),qsturng(.85, r, 24)
##print '.90',qsturng(.90, r, 20),qsturng(.90, r, 22),qsturng(.90, r, 24)
##print
##print 'p and v interpolation'
##print '\t120\t500\tinf'
##print '.950',qsturng(.95, r, 120),qsturng(.95, r, 500),qsturng(.95, r, inf)
##print '.960',qsturng(.96, r, 120),qsturng(.96, r, 500),qsturng(.96, r, inf)
##print '.975',qsturng(.975, r, 120),qsturng(.975, r, 500),qsturng(.975, r, inf)
##print
##print 'p and v interpolation'
##print '\t40\t50\t60'
##print '.950',qsturng(.95, r, 40),qsturng(.95, r, 50),qsturng(.95, r, 60)
##print '.960',qsturng(.96, r, 40),qsturng(.96, r, 50),qsturng(.96, r, 60)
##print '.975',qsturng(.975, r, 40),qsturng(.975, r, 50),qsturng(.975, r, 60)
##print
##print 'p and v interpolation'
##print '\t20\t22\t24'
##print '.50',qsturng(.5, r, 20),qsturng(.5, r, 22),qsturng(.5, r, 24)
##print '.60',qsturng(.6, r, 20),qsturng(.6, r, 22),qsturng(.6, r, 24)
##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)
| 0.008852 |
#!/usr/bin/env python3
# GTR2 to GSC2013 converter
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import rfactortools
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GTR2 to GSC2013 converter')
parser.add_argument('DIRECTORY', action='store', type=str, nargs=1,
help='directory containing the mod')
parser.add_argument('-o', '--output', metavar='DIR', type=str, required=True,
help="output directory")
parser.add_argument('-d', '--datadir', metavar='DIR', type=str, required=True,
help="GTR2 directory")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help="be more verbose")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
conv = rfactortools.GTR2ToGSC2013(args.datadir, args.DIRECTORY[0])
conv.convert_all(args.output)
# EOF #
| 0.001178 |
import logging
import time
from insights.ui.base import Base
from insights.ui.locators import rules_locators
from insights.ui.navigator import Navigator
LOGGER = logging.getLogger('insights_portal')
class Rules(Base):
"""
Identifies contents from Rules page of Insights
"""
def navigate_to_entity(self):
Navigator(self.browser).go_to_rules()
def go_to_filter(self, name=None):
"""
This will select rules filter
"""
time.sleep(5) #Added explicit wait as rules cards takes time to load
if name is not None:
LOGGER.info("Checking filter: " + name)
if name is 'Availability':
self.click(rules_locators['rules.filter.availability'])
elif name is 'Performance':
self.click(rules_locators['rules.filter.performance'])
elif name is 'Stability':
self.click(rules_locators['rules.filter.stability'])
elif name is 'Security':
self.click(rules_locators['rules.filter.security'])
elif name is 'All':
self.click(rules_locators['rules.filter.all'])
def get_active_filter_text(self):
return self.find_element(rules_locators['rules.active.filter']).text
def get_rule_card_title(self):
time.sleep(5) #Added explicit wait as rule cards takes time to load
rules_title = self.find_elements(rules_locators['rules.cards'])
title = []
for rule_title in rules_title:
title.append(rule_title.text)
LOGGER.info(title)
return title
def search_rule(self, search_text='HTTPoxy'):
self.click(rules_locators['rules.search.box'])
self.field_update("rules.search.box", search_text)
self.click(rules_locators['rules.search.icon'])
time.sleep(5) #Wait for search rules
def get_rules_count(self):
rule_blocks = self.find_elements(rules_locators['rules.content.blocks'])
return len(rule_blocks)
| 0.004455 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, glob, py_compile, subprocess, re, zipfile, time, textwrap
from setup import (Command, modules, functions, basenames, __version__,
__appname__)
from setup.build_environment import msvc, MT, RC, is64bit
from setup.installer.windows.wix import WixMixIn
ICU_DIR = os.environ.get('ICU_DIR', r'Q:\icu')
OPENSSL_DIR = os.environ.get('OPENSSL_DIR', r'Q:\openssl')
QT_DIR = os.environ.get('QT_DIR', 'Q:\\Qt\\current')
QT_DLLS = ['Core', 'Gui', 'Network', 'Svg', 'WebKit', 'Xml', 'XmlPatterns']
SW = r'C:\cygwin\home\kovid\sw'
IMAGEMAGICK = os.path.join(SW, 'build',
'ImageMagick-*\\VisualMagick\\bin')
CRT = r'C:\Microsoft.VC90.CRT'
LZMA = r'Q:\easylzma\build\easylzma-0.0.8'
VERSION = re.sub('[a-z]\d+', '', __version__)
WINVER = VERSION+'.0'
machine = 'X64' if is64bit else 'X86'
DESCRIPTIONS = {
'calibre' : 'The main calibre program',
'ebook-viewer' : 'Viewer for all e-book formats',
'lrfviewer' : 'Viewer for LRF files',
'ebook-convert': 'Command line interface to the conversion/news download system',
'ebook-meta' : 'Command line interface for manipulating e-book metadata',
'calibredb' : 'Command line interface to the calibre database',
'calibre-launcher' : 'Utility functions common to all executables',
'calibre-debug' : 'Command line interface for calibre debugging/development',
'calibre-customize' : 'Command line interface to calibre plugin system',
'pdfmanipulate' : 'Command line tool to manipulate PDF files',
'calibre-server': 'Standalone calibre content server',
'calibre-parallel': 'calibre worker process',
'calibre-smtp' : 'Command line interface for sending books via email',
}
def walk(dir):
''' A nice interface to os.walk '''
for record in os.walk(dir):
for f in record[-1]:
yield os.path.join(record[0], f)
class Win32Freeze(Command, WixMixIn):
description = 'Freeze windows calibre installation'
def add_options(self, parser):
parser.add_option('--no-ice', default=False, action='store_true',
help='Disable ICE checks when building MSI (needed when running'
' from cygwin sshd)')
parser.add_option('--msi-compression', '--compress', default='high',
help='Compression when generating installer. Set to none to disable')
parser.add_option('--keep-site', default=False, action='store_true',
help='Keep human readable site.py')
parser.add_option('--verbose', default=0, action="count",
help="Be more verbose")
def run(self, opts):
self.SW = SW
self.opts = opts
self.src_root = self.d(self.SRC)
self.base = self.j(self.d(self.SRC), 'build', 'winfrozen')
self.rc_template = self.j(self.d(self.a(__file__)), 'template.rc')
self.py_ver = ''.join(map(str, sys.version_info[:2]))
self.lib_dir = self.j(self.base, 'Lib')
self.pylib = self.j(self.base, 'pylib.zip')
self.dll_dir = self.j(self.base, 'DLLs')
self.plugins_dir = os.path.join(self.base, 'plugins2')
self.portable_base = self.j(self.d(self.base), 'Calibre Portable')
self.obj_dir = self.j(self.src_root, 'build', 'launcher')
self.initbase()
self.build_launchers()
self.add_plugins()
self.freeze()
self.embed_manifests()
self.install_site_py()
self.archive_lib_dir()
self.remove_CRT_from_manifests()
self.create_installer()
if not is64bit:
self.build_portable()
self.build_portable_installer()
self.sign_installers()
def remove_CRT_from_manifests(self):
'''
The dependency on the CRT is removed from the manifests of all DLLs.
This allows the CRT loaded by the .exe files to be used instead.
'''
search_pat = re.compile(r'(?is)<dependency>.*Microsoft\.VC\d+\.CRT')
repl_pat = re.compile(
r'(?is)<dependency>.*?Microsoft\.VC\d+\.CRT.*?</dependency>')
for dll in (glob.glob(self.j(self.dll_dir, '*.dll')) +
glob.glob(self.j(self.plugins_dir, '*.pyd'))):
bn = self.b(dll)
with open(dll, 'rb') as f:
raw = f.read()
match = search_pat.search(raw)
if match is None:
continue
self.info('Removing CRT dependency from manifest of: %s'%bn)
# Blank out the bytes corresponding to the dependency specification
nraw = repl_pat.sub(lambda m: b' '*len(m.group()), raw)
if len(nraw) != len(raw) or nraw == raw:
raise Exception('Something went wrong with %s'%bn)
with open(dll, 'wb') as f:
f.write(nraw)
def initbase(self):
if self.e(self.base):
shutil.rmtree(self.base)
os.makedirs(self.base)
def add_plugins(self):
self.info('Adding plugins...')
tgt = self.plugins_dir
if os.path.exists(tgt):
shutil.rmtree(tgt)
os.mkdir(tgt)
base = self.j(self.SRC, 'calibre', 'plugins')
for f in glob.glob(self.j(base, '*.pyd')):
# We dont want the manifests as the manifest in the exe will be
# used instead
shutil.copy2(f, tgt)
def fix_pyd_bootstraps_in(self, folder):
for dirpath, dirnames, filenames in os.walk(folder):
for f in filenames:
name, ext = os.path.splitext(f)
bpy = self.j(dirpath, name + '.py')
if ext == '.pyd' and os.path.exists(bpy):
with open(bpy, 'rb') as f:
raw = f.read().strip()
if (not raw.startswith('def __bootstrap__') or not
raw.endswith('__bootstrap__()')):
raise Exception('The file %r has non'
' bootstrap code'%self.j(dirpath, f))
for ext in ('.py', '.pyc', '.pyo'):
x = self.j(dirpath, name+ext)
if os.path.exists(x):
os.remove(x)
def freeze(self):
shutil.copy2(self.j(self.src_root, 'LICENSE'), self.base)
self.info('Adding CRT')
shutil.copytree(CRT, self.j(self.base, os.path.basename(CRT)))
self.info('Adding resources...')
tgt = self.j(self.base, 'resources')
if os.path.exists(tgt):
shutil.rmtree(tgt)
shutil.copytree(self.j(self.src_root, 'resources'), tgt)
self.info('Adding Qt and python...')
shutil.copytree(r'C:\Python%s\DLLs'%self.py_ver, self.dll_dir,
ignore=shutil.ignore_patterns('msvc*.dll', 'Microsoft.*'))
for x in glob.glob(self.j(OPENSSL_DIR, 'bin', '*.dll')):
shutil.copy2(x, self.dll_dir)
for x in glob.glob(self.j(ICU_DIR, 'source', 'lib', '*.dll')):
shutil.copy2(x, self.dll_dir)
for x in QT_DLLS:
x += '4.dll'
if not x.startswith('phonon'):
x = 'Qt'+x
shutil.copy2(os.path.join(QT_DIR, 'bin', x), self.dll_dir)
shutil.copy2(r'C:\windows\system32\python%s.dll'%self.py_ver,
self.dll_dir)
for x in os.walk(r'C:\Python%s\Lib'%self.py_ver):
for f in x[-1]:
if f.lower().endswith('.dll'):
f = self.j(x[0], f)
shutil.copy2(f, self.dll_dir)
shutil.copy2(
r'C:\Python%(v)s\Lib\site-packages\pywin32_system32\pywintypes%(v)s.dll'
% dict(v=self.py_ver), self.dll_dir)
def ignore_lib(root, items):
ans = []
for x in items:
ext = os.path.splitext(x)[1]
if (not ext and (x in ('demos', 'tests'))) or \
(ext in ('.dll', '.chm', '.htm', '.txt')):
ans.append(x)
return ans
shutil.copytree(r'C:\Python%s\Lib'%self.py_ver, self.lib_dir,
ignore=ignore_lib)
# Fix win32com
sp_dir = self.j(self.lib_dir, 'site-packages')
comext = self.j(sp_dir, 'win32comext')
shutil.copytree(self.j(comext, 'shell'), self.j(sp_dir, 'win32com', 'shell'))
shutil.rmtree(comext)
# Fix PyCrypto and Pillow, removing the bootstrap .py modules that load
# the .pyd modules, since they do not work when in a zip file
for folder in os.listdir(sp_dir):
folder = self.j(sp_dir, folder)
if os.path.isdir(folder):
self.fix_pyd_bootstraps_in(folder)
for pat in (r'PyQt4\uic\port_v3', ):
x = glob.glob(self.j(self.lib_dir, 'site-packages', pat))[0]
shutil.rmtree(x)
self.info('Adding calibre sources...')
for x in glob.glob(self.j(self.SRC, '*')):
shutil.copytree(x, self.j(sp_dir, self.b(x)))
for x in (r'calibre\manual', r'calibre\trac', 'pythonwin'):
deld = self.j(sp_dir, x)
if os.path.exists(deld):
shutil.rmtree(deld)
for x in os.walk(self.j(sp_dir, 'calibre')):
for f in x[-1]:
if not f.endswith('.py'):
os.remove(self.j(x[0], f))
self.info('Byte-compiling all python modules...')
for x in ('test', 'lib2to3', 'distutils'):
shutil.rmtree(self.j(self.lib_dir, x))
for x in os.walk(self.lib_dir):
root = x[0]
for f in x[-1]:
if f.endswith('.py'):
y = self.j(root, f)
rel = os.path.relpath(y, self.lib_dir)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
except:
self.warn('Failed to byte-compile', y)
pyc, pyo = y+'c', y+'o'
epyc, epyo, epy = map(os.path.exists, (pyc,pyo,y))
if (epyc or epyo) and epy:
os.remove(y)
if epyo and epyc:
os.remove(pyc)
self.info('\nAdding Qt plugins...')
qt_prefix = QT_DIR
plugdir = self.j(qt_prefix, 'plugins')
tdir = self.j(self.base, 'qt_plugins')
for d in ('imageformats', 'codecs', 'iconengines'):
self.info('\t', d)
imfd = os.path.join(plugdir, d)
tg = os.path.join(tdir, d)
if os.path.exists(tg):
shutil.rmtree(tg)
shutil.copytree(imfd, tg)
for dirpath, dirnames, filenames in os.walk(tdir):
for x in filenames:
if not x.endswith('.dll'):
os.remove(self.j(dirpath, x))
print
print 'Adding third party dependencies'
print '\tAdding misc binary deps'
bindir = os.path.join(SW, 'bin')
for x in ('pdftohtml', 'pdfinfo', 'pdftoppm'):
shutil.copy2(os.path.join(bindir, x+'.exe'), self.base)
for pat in ('*.dll',):
for f in glob.glob(os.path.join(bindir, pat)):
ok = True
for ex in ('expatw', 'testplug'):
if ex in f.lower():
ok = False
if not ok:
continue
dest = self.dll_dir
shutil.copy2(f, dest)
for x in ('zlib1.dll', 'libxml2.dll', 'libxslt.dll', 'libexslt.dll'):
msrc = self.j(bindir, x+'.manifest')
if os.path.exists(msrc):
shutil.copy2(msrc, self.dll_dir)
# Copy ImageMagick
impath = glob.glob(IMAGEMAGICK)[-1]
for pat in ('*.dll', '*.xml'):
for f in glob.glob(self.j(impath, pat)):
ok = True
for ex in ('magick++', 'x11.dll', 'xext.dll'):
if ex in f.lower():
ok = False
if not ok:
continue
shutil.copy2(f, self.dll_dir)
def embed_manifests(self):
self.info('Embedding remaining manifests...')
for x in os.walk(self.base):
for f in x[-1]:
base, ext = os.path.splitext(f)
if ext != '.manifest':
continue
dll = self.j(x[0], base)
manifest = self.j(x[0], f)
res = 2
if os.path.splitext(dll)[1] == '.exe':
res = 1
if os.path.exists(dll):
self.run_builder([MT, '-manifest', manifest,
'-outputresource:%s;%d'%(dll,res)])
os.remove(manifest)
def compress(self):
self.info('Compressing app dir using 7-zip')
subprocess.check_call([r'C:\Program Files\7-Zip\7z.exe', 'a', '-r',
'-scsUTF-8', '-sfx', 'winfrozen', 'winfrozen'], cwd=self.base)
def embed_resources(self, module, desc=None, extra_data=None,
product_description=None):
icon_base = self.j(self.src_root, 'icons')
icon_map = {'calibre':'library', 'ebook-viewer':'viewer',
'lrfviewer':'viewer', 'calibre-portable':'library'}
file_type = 'DLL' if module.endswith('.dll') else 'APP'
template = open(self.rc_template, 'rb').read()
bname = self.b(module)
internal_name = os.path.splitext(bname)[0]
icon = icon_map.get(internal_name, 'command-prompt')
if internal_name.startswith('calibre-portable-'):
icon = 'install'
icon = self.j(icon_base, icon+'.ico')
if desc is None:
defdesc = 'A dynamic link library' if file_type == 'DLL' else \
'An executable program'
desc = DESCRIPTIONS.get(internal_name, defdesc)
license = 'GNU GPL v3.0'
def e(val):
return val.replace('"', r'\"')
if product_description is None:
product_description = __appname__ + ' - E-book management'
rc = template.format(
icon=icon,
file_type=e(file_type),
file_version=e(WINVER.replace('.', ',')),
file_version_str=e(WINVER),
file_description=e(desc),
internal_name=e(internal_name),
original_filename=e(bname),
product_version=e(WINVER.replace('.', ',')),
product_version_str=e(__version__),
product_name=e(__appname__),
product_description=e(product_description),
legal_copyright=e(license),
legal_trademarks=e(__appname__ +
' is a registered U.S. trademark number 3,666,525')
)
if extra_data:
rc += '\nextra extra "%s"'%extra_data
tdir = self.obj_dir
rcf = self.j(tdir, bname+'.rc')
with open(rcf, 'wb') as f:
f.write(rc)
res = self.j(tdir, bname + '.res')
cmd = [RC, '/n', '/fo'+res, rcf]
self.run_builder(cmd)
return res
def install_site_py(self):
if not os.path.exists(self.lib_dir):
os.makedirs(self.lib_dir)
shutil.copy2(self.j(self.d(__file__), 'site.py'), self.lib_dir)
y = os.path.join(self.lib_dir, 'site.py')
py_compile.compile(y, dfile='site.py', doraise=True)
if not self.opts.keep_site:
os.remove(y)
def run_builder(self, cmd, show_output=False):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if p.wait() != 0:
self.info('Failed to run builder:')
self.info(*cmd)
self.info(p.stdout.read())
self.info(p.stderr.read())
sys.exit(1)
if show_output:
self.info(p.stdout.read())
self.info(p.stderr.read())
def build_portable_installer(self):
zf = self.a(self.j('dist', 'calibre-portable-%s.zip.lz'%VERSION))
usz = os.path.getsize(zf)
def cc(src, obj):
cflags = '/c /EHsc /MT /W4 /Ox /nologo /D_UNICODE /DUNICODE /DPSAPI_VERSION=1'.split()
cflags.append(r'/I%s\include'%LZMA)
cflags.append('/DUNCOMPRESSED_SIZE=%d'%usz)
if self.newer(obj, [src]):
self.info('Compiling', obj)
cmd = [msvc.cc] + cflags + ['/Fo'+obj, src]
self.run_builder(cmd)
src = self.j(self.src_root, 'setup', 'installer', 'windows',
'portable-installer.cpp')
obj = self.j(self.obj_dir, self.b(src)+'.obj')
xsrc = self.j(self.src_root, 'setup', 'installer', 'windows',
'XUnzip.cpp')
xobj = self.j(self.obj_dir, self.b(xsrc)+'.obj')
cc(src, obj)
cc(xsrc, xobj)
exe = self.j('dist', 'calibre-portable-installer-%s.exe'%VERSION)
if self.newer(exe, [obj, xobj]):
self.info('Linking', exe)
cmd = [msvc.linker] + ['/INCREMENTAL:NO', '/MACHINE:'+machine,
'/LIBPATH:'+self.obj_dir, '/SUBSYSTEM:WINDOWS',
'/LIBPATH:'+(LZMA+r'\lib\Release'),
'/RELEASE', '/MANIFEST', '/MANIFESTUAC:level="asInvoker" uiAccess="false"',
'/ENTRY:wWinMainCRTStartup',
'/OUT:'+exe, self.embed_resources(exe,
desc='Calibre Portable Installer', extra_data=zf,
product_description='Calibre Portable Installer'),
xobj, obj, 'User32.lib', 'Shell32.lib', 'easylzma_s.lib',
'Ole32.lib', 'Shlwapi.lib', 'Kernel32.lib', 'Psapi.lib']
self.run_builder(cmd)
manifest = exe + '.manifest'
with open(manifest, 'r+b') as f:
raw = f.read()
f.seek(0)
f.truncate()
# TODO: Add the windows 8 GUID to the compatibility section
# after windows 8 is released, see:
# http://msdn.microsoft.com/en-us/library/windows/desktop/hh848036(v=vs.85).aspx
raw = raw.replace(b'</assembly>', textwrap.dedent(
b'''\
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!--The ID below indicates app support for Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!--The ID below indicates app support for Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
</application>
</compatibility>
</assembly>
'''))
f.write(raw)
self.run_builder([MT, '-manifest', manifest,
'-outputresource:%s;1'%exe])
os.remove(manifest)
os.remove(zf)
def build_portable(self):
base = self.portable_base
if os.path.exists(base):
shutil.rmtree(base)
os.makedirs(base)
src = self.j(self.src_root, 'setup', 'installer', 'windows',
'portable.c')
obj = self.j(self.obj_dir, self.b(src)+'.obj')
cflags = '/c /EHsc /MT /W3 /Ox /nologo /D_UNICODE /DUNICODE'.split()
if self.newer(obj, [src]):
self.info('Compiling', obj)
cmd = [msvc.cc] + cflags + ['/Fo'+obj, '/Tc'+src]
self.run_builder(cmd)
exe = self.j(base, 'calibre-portable.exe')
if self.newer(exe, [obj]):
self.info('Linking', exe)
cmd = [msvc.linker] + ['/INCREMENTAL:NO', '/MACHINE:'+machine,
'/LIBPATH:'+self.obj_dir, '/SUBSYSTEM:WINDOWS',
'/RELEASE',
'/ENTRY:wWinMainCRTStartup',
'/OUT:'+exe, self.embed_resources(exe),
obj, 'User32.lib']
self.run_builder(cmd)
self.info('Creating portable installer')
shutil.copytree(self.base, self.j(base, 'Calibre'))
os.mkdir(self.j(base, 'Calibre Library'))
os.mkdir(self.j(base, 'Calibre Settings'))
name = '%s-portable-%s.zip'%(__appname__, __version__)
name = self.j('dist', name)
with zipfile.ZipFile(name, 'w', zipfile.ZIP_STORED) as zf:
self.add_dir_to_zip(zf, base, 'Calibre Portable')
subprocess.check_call([LZMA + r'\bin\elzma.exe', '-9', '--lzip', name])
def sign_installers(self):
self.info('Signing installers...')
files = glob.glob(self.j('dist', '*.msi')) + glob.glob(self.j('dist',
'*.exe'))
if not files:
raise ValueError('No installers found')
args = ['signtool.exe', 'sign', '/a', '/d',
'calibre - E-book management', '/du',
'http://calibre-ebook.com', '/t',
'http://timestamp.verisign.com/scripts/timstamp.dll']
try:
subprocess.check_call(args + files)
except subprocess.CalledProcessError:
print ('Signing failed, retrying with different timestamp server')
args[-1] = 'http://timestamp.comodoca.com/authenticode'
subprocess.check_call(args + files)
def add_dir_to_zip(self, zf, path, prefix=''):
'''
Add a directory recursively to the zip file with an optional prefix.
'''
if prefix:
zi = zipfile.ZipInfo(prefix+'/')
zi.external_attr = 16
zf.writestr(zi, '')
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
fp = (prefix + ('/' if prefix else '')).replace('//', '/')
for f in os.listdir('.'):
arcname = fp + f
if os.path.isdir(f):
self.add_dir_to_zip(zf, f, prefix=arcname)
else:
zf.write(f, arcname)
finally:
os.chdir(cwd)
def build_launchers(self, debug=False):
if not os.path.exists(self.obj_dir):
os.makedirs(self.obj_dir)
dflags = (['/Zi'] if debug else [])
dlflags = (['/DEBUG'] if debug else ['/INCREMENTAL:NO'])
base = self.j(self.src_root, 'setup', 'installer', 'windows')
sources = [self.j(base, x) for x in ['util.c', 'MemoryModule.c']]
headers = [self.j(base, x) for x in ['util.h', 'MemoryModule.h']]
objects = [self.j(self.obj_dir, self.b(x)+'.obj') for x in sources]
cflags = '/c /EHsc /MD /W3 /Ox /nologo /D_UNICODE'.split()
cflags += ['/DPYDLL="python%s.dll"'%self.py_ver, '/IC:/Python%s/include'%self.py_ver]
for src, obj in zip(sources, objects):
if not self.newer(obj, headers+[src]):
continue
cmd = [msvc.cc] + cflags + dflags + ['/Fo'+obj, '/Tc'+src]
self.run_builder(cmd, show_output=True)
dll = self.j(self.obj_dir, 'calibre-launcher.dll')
ver = '.'.join(__version__.split('.')[:2])
if self.newer(dll, objects):
cmd = [msvc.linker, '/DLL', '/VERSION:'+ver, '/OUT:'+dll,
'/nologo', '/MACHINE:'+machine] + dlflags + objects + \
[self.embed_resources(dll),
'/LIBPATH:C:/Python%s/libs'%self.py_ver,
'python%s.lib'%self.py_ver,
'/delayload:python%s.dll'%self.py_ver]
self.info('Linking calibre-launcher.dll')
self.run_builder(cmd, show_output=True)
src = self.j(base, 'main.c')
shutil.copy2(dll, self.base)
for typ in ('console', 'gui', ):
self.info('Processing %s launchers'%typ)
subsys = 'WINDOWS' if typ == 'gui' else 'CONSOLE'
for mod, bname, func in zip(modules[typ], basenames[typ],
functions[typ]):
xflags = list(cflags)
if typ == 'gui':
xflags += ['/DGUI_APP=']
xflags += ['/DMODULE="%s"'%mod, '/DBASENAME="%s"'%bname,
'/DFUNCTION="%s"'%func]
dest = self.j(self.obj_dir, bname+'.obj')
if self.newer(dest, [src]+headers):
self.info('Compiling', bname)
cmd = [msvc.cc] + xflags + dflags + ['/Tc'+src, '/Fo'+dest]
self.run_builder(cmd)
exe = self.j(self.base, bname+'.exe')
lib = dll.replace('.dll', '.lib')
if self.newer(exe, [dest, lib, self.rc_template, __file__]):
self.info('Linking', bname)
cmd = [msvc.linker] + ['/MACHINE:'+machine,
'/LIBPATH:'+self.obj_dir, '/SUBSYSTEM:'+subsys,
'/LIBPATH:C:/Python%s/libs'%self.py_ver, '/RELEASE',
'/OUT:'+exe] + dlflags + [self.embed_resources(exe),
dest, lib]
self.run_builder(cmd)
def archive_lib_dir(self):
self.info('Putting all python code into a zip file for performance')
self.zf_timestamp = time.localtime(time.time())[:6]
self.zf_names = set()
with zipfile.ZipFile(self.pylib, 'w', zipfile.ZIP_STORED) as zf:
# Add the .pyds from python and calibre to the zip file
for x in (self.plugins_dir, self.dll_dir):
for pyd in os.listdir(x):
if pyd.endswith('.pyd') and pyd not in {
# sqlite_custom has to be a file for
# sqlite_load_extension to work
'sqlite_custom.pyd',
# calibre_style has to be loaded by Qt therefore it
# must be a file
'calibre_style.pyd',
# Because of https://github.com/fancycode/MemoryModule/issues/4
# any extensions that use C++ exceptions must be loaded
# from files
'unrar.pyd', 'wpd.pyd', 'podofo.pyd',
'progress_indicator.pyd',
# As per this https://bugs.launchpad.net/bugs/1087816
# on some systems magick.pyd fails to load from memory
# on 64 bit
'magick.pyd',
}:
self.add_to_zipfile(zf, pyd, x)
os.remove(self.j(x, pyd))
# Add everything in Lib except site-packages to the zip file
for x in os.listdir(self.lib_dir):
if x == 'site-packages':
continue
self.add_to_zipfile(zf, x, self.lib_dir)
sp = self.j(self.lib_dir, 'site-packages')
# Special handling for PIL and pywin32
handled = set(['PIL.pth', 'pywin32.pth', 'PIL', 'win32'])
if not is64bit:
self.add_to_zipfile(zf, 'PIL', sp)
base = self.j(sp, 'win32', 'lib')
for x in os.listdir(base):
if os.path.splitext(x)[1] not in ('.exe',):
self.add_to_zipfile(zf, x, base)
base = self.d(base)
for x in os.listdir(base):
if not os.path.isdir(self.j(base, x)):
if os.path.splitext(x)[1] not in ('.exe',):
self.add_to_zipfile(zf, x, base)
handled.add('easy-install.pth')
# We dont want the site.py from site-packages
handled.add('site.pyo')
for d in self.get_pth_dirs(self.j(sp, 'easy-install.pth')):
handled.add(self.b(d))
for x in os.listdir(d):
if x in {'EGG-INFO', 'site.py', 'site.pyc', 'site.pyo'}:
continue
self.add_to_zipfile(zf, x, d)
# The rest of site-packages
for x in os.listdir(sp):
if x in handled or x.endswith('.egg-info'):
continue
absp = self.j(sp, x)
if os.path.isdir(absp):
if not os.listdir(absp):
continue
self.add_to_zipfile(zf, x, sp)
else:
self.add_to_zipfile(zf, x, sp)
shutil.rmtree(self.lib_dir)
def get_pth_dirs(self, pth):
base = os.path.dirname(pth)
for line in open(pth).readlines():
line = line.strip()
if not line or line.startswith('#') or line.startswith('import'):
continue
candidate = os.path.abspath(self.j(base, line))
if os.path.exists(candidate):
if not os.path.isdir(candidate):
raise ValueError('%s is not a directory'%candidate)
yield candidate
def add_to_zipfile(self, zf, name, base, exclude=frozenset()):
abspath = self.j(base, name)
name = name.replace(os.sep, '/')
if name in self.zf_names:
raise ValueError('Already added %r to zipfile [%r]'%(name, abspath))
zinfo = zipfile.ZipInfo(filename=name, date_time=self.zf_timestamp)
if os.path.isdir(abspath):
if not os.listdir(abspath):
return
zinfo.external_attr = 0o700 << 16
zf.writestr(zinfo, '')
for x in os.listdir(abspath):
if x not in exclude:
self.add_to_zipfile(zf, name + os.sep + x, base)
else:
ext = os.path.splitext(name)[1].lower()
if ext in ('.dll',):
raise ValueError('Cannot add %r to zipfile'%abspath)
zinfo.external_attr = 0o600 << 16
if ext in ('.py', '.pyc', '.pyo', '.pyd'):
with open(abspath, 'rb') as f:
zf.writestr(zinfo, f.read())
self.zf_names.add(name)
| 0.004201 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, too-many-arguments, too-many-locals, too-many-public-methods, too-many-branches
"""`BaseModule` defines an API for modules."""
import time
import logging
import warnings
from .. import metric
from .. import ndarray
from ..context import cpu
from ..model import BatchEndParam
from ..initializer import Uniform
from ..io import DataDesc
from ..base import _as_list
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments."""
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors."""
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match"""
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes
class BaseModule(object):
"""The base class of a module.
A module represents a computation component. One can think of module as a computation machine.
A module can execute forward and backward passes and update parameters in a model.
We aim to make the APIs easy to use, especially in the case when we need to use the imperative
API to work with multiple modules (e.g. stochastic depth network).
A module has several states:
- Initial state: Memory is not allocated yet, so the module is not ready for computation yet.
- Binded: Shapes for inputs, outputs, and parameters are all known, memory has been allocated,
and the module is ready for computation.
- Parameters are initialized: For modules with parameters, doing computation before
initializing the parameters might result in undefined outputs.
- Optimizer is installed: An optimizer can be installed to a module. After this, the parameters
of the module can be updated according to the optimizer after gradients are computed
(forward-backward).
In order for a module to interact with others, it must be able to report the
following information in its initial state (before binding):
- `data_names`: list of type string indicating the names of the required input data.
- `output_names`: list of type string indicating the names of the required outputs.
After binding, a module should be able to report the following richer information:
- state information
- `binded`: `bool`, indicates whether the memory buffers needed for computation
have been allocated.
- `for_training`: whether the module is bound for training.
- `params_initialized`: `bool`, indicates whether the parameters of this module
have been initialized.
- `optimizer_initialized`: `bool`, indicates whether an optimizer is defined
and initialized.
- `inputs_need_grad`: `bool`, indicates whether gradients with respect to the
input data are needed. Might be useful when implementing composition of modules.
- input/output information
- `data_shapes`: a list of `(name, shape)`. In theory, since the memory is allocated,
we could directly provide the data arrays. But in the case of data parallelism,
the data arrays might not be of the same shape as viewed from the external world.
- `label_shapes`: a list of `(name, shape)`. This might be `[]` if the module does
not need labels (e.g. it does not contains a loss function at the top), or a module
is not bound for training.
- `output_shapes`: a list of `(name, shape)` for outputs of the module.
- parameters (for modules with parameters)
- `get_params()`: return a tuple `(arg_params, aux_params)`. Each of those
is a dictionary of name to ``NDArray`` mapping. Those `NDArray` always lives on
CPU. The actual parameters used for computing might live on other devices (GPUs),
this function will retrieve (a copy of) the latest parameters. Therefore, modifying
- ``set_params(arg_params, aux_params)``: assign parameters to the devices
doing the computation.
- ``init_params(...)``: a more flexible interface to assign or initialize the parameters.
- setup
- `bind()`: prepare environment for computation.
- `init_optimizer()`: install optimizer for parameter updating.
- computation
- `forward(data_batch)`: forward operation.
- `backward(out_grads=None)`: backward operation.
- `update()`: update parameters according to installed optimizer.
- `get_outputs()`: get outputs of the previous forward operation.
- `get_input_grads()`: get the gradients with respect to the inputs computed
in the previous backward operation.
- `update_metric(metric, labels)`: update performance metric for the previous forward
computed results.
- other properties (mostly for backward compatibility)
- `symbol`: the underlying symbolic graph for this module (if any)
This property is not necessarily constant. For example, for `BucketingModule`,
this property is simply the *current* symbol being used. For other modules,
this value might not be well defined.
When those intermediate-level API are implemented properly, the following
high-level API will be automatically available for a module:
- `fit`: train the module parameters on a data set.
- `predict`: run prediction on a data set and collect outputs.
- `score`: run prediction on a data set and evaluate performance.
Examples
--------
>>> # An example of creating a mxnet module.
>>> import mxnet as mx
>>> data = mx.symbol.Variable('data')
>>> fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
>>> act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
>>> fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
>>> act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
>>> fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
>>> out = mx.symbol.SoftmaxOutput(fc3, name = 'softmax')
>>> mod = mx.mod.Module(out)
"""
def __init__(self, logger=logging):
self.logger = logger
self.binded = False
self.for_training = False
self.inputs_need_grad = False
self.params_initialized = False
self.optimizer_initialized = False
self._symbol = None
self._total_exec_bytes = 0
################################################################################
# High Level API
################################################################################
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True)
self.backward()
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.forward(eval_batch, is_train=False)
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value()
def iter_predict(self, eval_data, num_batch=None, reset=True):
"""Iterates over predictions.
Example Usage:
----------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch)
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None):
"""Trains the module parameters.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMetric
Defaults to 'accuracy'. The performance measure used to display during training.
Other possible predefined metrics are:
'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'.
epoch_end_callback : function or list of functions
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Defaults to 'local'.
optimizer : str or Optimizer
Defaults to 'sgd'.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The parameters for
the optimizer constructor.
The default value is not a dict, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each mini-batch during evaluation.
initializer : Initializer
The initializer is called to initialize the module parameters when they are
not already initialized.
arg_params : dict
Defaults to ``None``, if not ``None``, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has a higher priority than `initializer`.
aux_params : dict
Defaults to ``None``. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params`
and `aux_params` are not ``None``. If this is ``True``, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Defaults to ``False``. Whether to force rebinding the executors if already bound.
force_init : bool
Defaults to ``False``. Indicates whether to force initialization even if the
parameters are already initialized.
begin_epoch : int
Defaults to 0. Indicates the starting epoch. Usually, if resumed from a
checkpoint saved at a previous training phase at epoch N, then this value should be
N+1.
num_epoch : int
Number of epochs for training.
Examples
--------
>>> # An example of using fit for training.
>>> # Assume training dataIter and validation dataIter are ready
>>> # Assume loading a previously checkpointed model
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 3)
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter, optimizer='sgd',
... optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
... arg_params=arg_params, aux_params=aux_params,
... eval_metric='acc', num_epoch=10, begin_epoch=3)
"""
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch)
except StopIteration:
end_of_batch = True
self.update_metric(eval_metric, data_batch.label)
if monitor is not None:
monitor.toc_print()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
nbatch += 1
# one epoch of training is finished
for name, val in eval_metric.get_name_value():
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
################################################################################
# Symbol information
################################################################################
@property
def data_names(self):
"""A list of names for data required by this module."""
raise NotImplementedError()
@property
def output_names(self):
"""A list of names for the outputs of this module."""
raise NotImplementedError()
################################################################################
# Input/Output information
################################################################################
@property
def data_shapes(self):
"""A list of (name, shape) pairs specifying the data inputs to this module."""
raise NotImplementedError()
@property
def label_shapes(self):
"""A list of (name, shape) pairs specifying the label inputs to this module.
If this module does not accept labels -- either it is a module without loss
function, or it is not bound for training, then this should return an empty
list ``[]``.
"""
raise NotImplementedError()
@property
def output_shapes(self):
"""A list of (name, shape) pairs specifying the outputs of this module."""
raise NotImplementedError()
################################################################################
# Parameters of a module
################################################################################
def get_params(self):
"""Gets parameters, those are potentially copies of the the actual parameters used
to do computation on the device.
Returns
-------
``(arg_params, aux_params)``
A pair of dictionaries each mapping parameter names to NDArray values.
Examples
--------
>>> # An example of getting module parameters.
>>> print mod.get_params()
({'fc2_weight': <NDArray 64x128 @cpu(0)>, 'fc1_weight': <NDArray 128x100 @cpu(0)>,
'fc3_bias': <NDArray 10 @cpu(0)>, 'fc3_weight': <NDArray 10x64 @cpu(0)>,
'fc2_bias': <NDArray 64 @cpu(0)>, 'fc1_bias': <NDArray 128 @cpu(0)>}, {})
"""
raise NotImplementedError()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, `force_init` will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of initializing module parameters.
>>> mod.init_params()
"""
raise NotImplementedError()
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
"""
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices
If `merge_multi_context` is ``True``, returns output of form ``[out1, out2]``.
Otherwise, it returns output of the form
``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All output elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
A list of ``NDArray`` or a list of list of ``NDArray``.
"""
assert self.binded and self.params_initialized
assert not merge_multi_context
return []
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArray
Source states arrays formatted like
``[[state1_dev1, state1_dev2], [state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
assert not states and not value
def install_monitor(self, mon):
"""Installs monitor on all executors."""
raise NotImplementedError()
################################################################################
# Computations
################################################################################
def prepare(self, data_batch):
'''Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
Parameters
----------
data_batch : DataBatch
'''
pass
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
Examples
--------
>>> import mxnet as mx
>>> from collections import namedtuple
>>> Batch = namedtuple('Batch', ['data'])
>>> data = mx.sym.Variable('data')
>>> out = data * 2
>>> mod = mx.mod.Module(symbol=out, label_names=None)
>>> mod.bind(data_shapes=[('data', (1, 10))])
>>> mod.init_params()
>>> data1 = [mx.nd.ones((1, 10))]
>>> mod.forward(Batch(data1))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]
>>> # Forward with data batch of different shape
>>> data2 = [mx.nd.ones((3, 5))]
>>> mod.forward(Batch(data2))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]]
"""
raise NotImplementedError()
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
Examples
--------
>>> # An example of backward computation.
>>> mod.backward()
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise,
it returns out put of form ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All the output elements have type `NDArray`. When `merge_multi_context` is ``False``,
those `NDArray` instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of `NDArray` or list of list of `NDArray`.
Output
Examples
--------
>>> # An example of getting forward output.
>>> print mod.get_outputs()[0].asnumpy()
[[ 0.09999977 0.10000153 0.10000716 0.10000195 0.09999853 0.09999743
0.10000272 0.10000113 0.09999088 0.09999888]]
"""
raise NotImplementedError()
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients to the inputs, computed in the previous backward computation.
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements have type `NDArray`. When `merge_multi_context` is ``False``, those `NDArray`
instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the gradients
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients.
Examples
--------
>>> # An example of getting input gradients.
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
Examples
--------
>>> # An example of updating module parameters.
>>> mod.init_optimizer(kvstore='local', optimizer='sgd',
... optimizer_params=(('learning_rate', 0.01), ))
>>> mod.backward()
>>> mod.update()
>>> print mod.get_params()[0]['fc3_weight'].asnumpy()
[[ 5.86930104e-03 5.28078526e-03 -8.88729654e-03 -1.08308345e-03
6.13054074e-03 4.27560415e-03 1.53817423e-03 4.62131854e-03
4.69872449e-03 -2.42400169e-03 9.94111411e-04 1.12386420e-03
...]]
"""
raise NotImplementedError()
def update_metric(self, eval_metric, labels):
"""Evaluates and accumulates evaluation metric on outputs of the last forward
computation.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray
Typically `data_batch.label`.
Examples
--------
>>> # An example of updating evaluation metric.
>>> mod.forward(data_batch)
>>> mod.update_metric(metric, data_batch.label)
"""
raise NotImplementedError()
################################################################################
# module setup
################################################################################
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
"""
raise NotImplementedError()
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers, as well as initialize kvstore for
distributed training
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicates whether to force re-initializing an optimizer
if it is already installed.
Examples
--------
>>> # An example of initializing optimizer.
>>> mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.005),))
"""
raise NotImplementedError()
################################################################################
# misc
################################################################################
@property
def symbol(self):
"""Gets the symbol associated with this module.
Except for `Module`, for other types of modules (e.g. `BucketingModule`), this
property might not be a constant throughout its life time. Some modules might
not even be associated with any symbols.
"""
return self._symbol
| 0.00403 |
from typing import cast, Optional, Union
from aiohttp import hdrs, web
from aiohttp.payload import IOBasePayload, Payload
from openapi_core.schema.operations.models import Operation
from openapi_core.schema.specs.models import Spec
from openapi_core.validation.request.datatypes import (
OpenAPIRequest,
RequestParameters,
)
from openapi_core.validation.response.datatypes import OpenAPIResponse
from yarl import URL
from ..annotations import Handler
from .constants import HANDLER_OPENAPI_MAPPING_KEY
from .exceptions import OperationError
from .utils import get_openapi_spec
def find_core_operation(
request: web.Request, handler: Handler
) -> Optional[Operation]:
mapping = getattr(handler, HANDLER_OPENAPI_MAPPING_KEY, None)
if not mapping:
return None
operation_id = mapping.get(request.method) or mapping.get(hdrs.METH_ANY)
if operation_id is None:
return None
try:
return get_core_operation(
get_openapi_spec(request.config_dict), operation_id
)
except OperationError:
return None
def get_core_operation(spec: Spec, operation_id: str) -> Operation:
for path in spec.paths.values():
for operation in path.operations.values():
if operation.operation_id == operation_id:
return operation
raise OperationError(
f"Unable to find operation '{operation_id}' in given OpenAPI spec"
)
def get_full_url_pattern(request: web.Request) -> str:
"""Get full URL pattern for given :class:`aiohttp.web.Request` instance."""
full_url: URL = request.url.with_path(get_path_pattern(request))
return full_url.human_repr()
def get_path_pattern(request: web.Request) -> str:
"""Get path pattern for given :class:`aiohttp.web.Request` instance.
When current handler is a dynamic route: use formatter, otherwise use
path from route info.
"""
info = request.match_info.route.get_info()
formatter = info.get("formatter")
return cast(str, formatter if formatter is not None else info.get("path"))
async def to_core_openapi_request(request: web.Request) -> OpenAPIRequest:
"""Convert aiohttp.web request to openapi-core request.
Afterwards opeanpi-core request can be used for validation request data
against spec.
"""
body: Optional[Union[bytes, str]] = None
if request.body_exists and request.can_read_body:
raw_body = await request.read()
# If it possible, convert bytes to string
try:
body = raw_body.decode("utf-8")
# If not, use bytes as request body instead
except UnicodeDecodeError:
body = raw_body
return OpenAPIRequest(
full_url_pattern=get_full_url_pattern(request),
method=request.method.lower(),
body=body,
mimetype=request.content_type,
parameters=to_core_request_parameters(request),
)
def to_core_openapi_response(response: web.StreamResponse) -> OpenAPIResponse:
"""Convert aiohttp.web response to openapi-core response."""
return OpenAPIResponse(
data=to_core_openapi_response_data(response),
status_code=response.status,
mimetype=response.content_type,
)
def to_core_openapi_response_data(
response: web.StreamResponse,
) -> Optional[bytes]:
if isinstance(response, web.Response):
body = response.body
if not body:
return None
# TODO: Find better way to provide response from payload
if isinstance(body, IOBasePayload):
return cast(bytes, body._value.getvalue())
if isinstance(body, Payload):
return cast(bytes, body._value)
return body
return None
def to_core_request_parameters(request: web.Request) -> RequestParameters:
header_attr = [
item
for item in RequestParameters.__attrs_attrs__
if item.name == "header"
][0]
is_dict_factory = header_attr.default.factory == dict
return RequestParameters(
query=request.rel_url.query,
header=request.headers if is_dict_factory else request.headers.items(),
cookie=request.cookies,
path=request.match_info,
)
| 0 |
import pandas as pd
import numpy as np
import os, sys, multiprocessing, re, glob
from sympy import Symbol, expand, N
try:
ncores = int(os.popen(r'echo $NCPUS').read())
except Exception as e :
print e
ncores=1
print ncores
available_cores=ncores
co2 =False
if co2:
smilesdf = pd.read_csv('../src/background/smiles_mined.csv')
smiles=pd.Series(smilesdf.smiles)
smiles.index=smilesdf.name
smiles['CO']='C'
smiles['CO2']='C'
smiles['DUMMY']=''
smiles['NA']=''
smiles =dict(zip(smiles.index,[str(i).upper().count('C') for i in smiles]))
print 'THIS ADDS INORGANICS< DO NOT USE COMPLETE MECH'
print 'todo - check duplicate matches, then also check combinations'
try: filename1=sys.argv[1]
except:filename1 = '../src/background/mcm331complete.kpp'
full = tuple(open(filename1))
try: filename=sys.argv[2]
except: filename = '../src/background/inorganic_mcm.kpp'
inorganics = tuple(open(filename))
fullstr='~'.join(full+inorganics).replace('\n','').replace('\t','').replace(' ','')
eqn = [re.sub(r"[\r\s\n]*",'',i).split(':') for i in re.findall(r'(\{[\. \s\w\d]*\}.*\:*);\r*~' ,fullstr)]
combined = [i.replace('\t','').replace(' ','').replace('\n','') for i in full+inorganics]
def iseqn (x):
if (re.search(r'\{[\. \s\d]*\}', x)):
return True
combined1 = [i.split('}')[1].split(':') for i in filter(iseqn , combined)]
nocoeff = re.compile(r'\b\d*\.*\d')
def pool_eqn(x):
#sort the reactions
r,p=x[0].split('=')
p=p.split('+')
p.sort()
r=r.split('+')
r.sort()
if co2:
cdiff=sum([smiles[nocoeff.sub('',i)] for i in p])-sum([smiles[nocoeff.sub('',i)] for i in r])
if cdiff<0: p.extend(['CO2']*abs(cdiff))
else: p.extend(['CO2']*cdiff)
p='+'.join(p)
r='+'.join(r)
x[0] = r+'='+p
#replace D and exp for sympy re.sub(r'(\d)[dD]([+-\.\d])',r'\1e\2', x[1].split('//')[0].replace('EXP','exp')
x[1] = x[1].split('//')[0].replace(';','')
return x
eqn = multiprocessing.Pool(available_cores).map(pool_eqn,combined1)
nocoeff = re.compile(r'\b[\d\.]*(\w+)\b')
specs = []
if co2:specs=['CO2']
for e in eqn:
specs.extend(re.findall(r"[\w']+", e[0]))
specs = list(set([nocoeff.match(i).group(1) for i in specs]))
specs.sort()
string = '''// reformatted by reformat.py
// contact: [email protected]
// filedata: %s
// %s species %s reactions
''' %(filename1 + '+' + filename, len(specs),len(eqn))
print string
string += '''
#INLINE F90_GLOBAL
REAL(dp)::M, N2, O2, RO2, H2O
#ENDINLINE
#INLINE F90_RCONST
#ENDINLINE
#INCLUDE atoms
#DEFVAR
'''
for i in specs:
if i == 'DUMMY': continue
string += i+'=IGNORE;\n'
string +='''#INLINE F90_RCONST
USE constants
RO2 = 0'''
''' get RO2 from mechanism '''
dummy = False
ro2 = ''
for i in full:
if 'RO2 = &' in i: dummy = True
if 'CALL' in i: break
if dummy: ro2+= i
ro2 = re.findall('C\(ind_[A-z0-9]*\)',ro2)
r2=re.compile(r'_([A-z0-9]*)\)')
ro2 = [y for y in ro2 if r2.search(y).group(1) in specs]
for i in ro2:
string += '''&
+%s'''%i
string += '''
CALL mcm_constants(time, temp, M, N2, O2, RO2, H2O)
#ENDINLINE
#EQUATIONS
'''
for i,j in enumerate(eqn):
if j[0][-1]=='=':j[0]+='DUMMY'
string += '{%04d} %s : %s;\n'%(i,j[0],j[1].replace('\r',''))
string = re.sub(r';\h*;',';',string)
ic_file = filename1.replace('../InitCons/','').replace('.csv','').replace('../src/background/','')
with open("formatted_"+ic_file, 'w') as f:
f.write(string)
print "\n formatted_"+ic_file+' written'
| 0.030396 |
import os
import sys
import logging
import optparse
from jenkinsapi import jenkins
log = logging.getLogger(__name__)
class jenkins_invoke(object):
@classmethod
def mkparser(cls):
parser = optparse.OptionParser()
DEFAULT_BASEURL=os.environ.get( "JENKINS_URL", "http://localhost/jenkins" )
parser.help_text = "Execute a number of jenkins jobs on the server of your choice. Optionally block until the jobs are complete."
parser.add_option("-J", "--jenkinsbase", dest="baseurl",
help="Base URL for the Jenkins server, default is %s" % DEFAULT_BASEURL,
type="str", default=DEFAULT_BASEURL)
parser.add_option('--username', '-u', dest='username',
help="Username for jenkins authentification", type='str', default=None)
parser.add_option('--password', '-p', dest='password',
help="password for jenkins user auth", type='str', default=None)
parser.add_option("-b", "--block", dest="block", action="store_true", default=False,
help="Block until each of the jobs is complete.")
parser.add_option("-t", "--token", dest="token",help="Optional security token.",
default=None)
return parser
@classmethod
def main(cls):
parser = cls.mkparser()
options, args = parser.parse_args()
try:
assert len(args) > 0, "Need to specify at least one job name"
except AssertionError, e:
log.critical(e[0])
parser.print_help()
sys.exit(1)
invoker = cls(options, args)
invoker()
def __init__(self, options, jobs):
self.options = options
self.jobs = jobs
self.api = self._get_api(baseurl=options.baseurl, username=options.username, password=options.password)
def _get_api(self, baseurl, username, password):
return jenkins.Jenkins(baseurl, username, password)
def __call__(self):
for job in self.jobs:
self.invokejob(job, block=self.options.block, token=self.options.token)
def invokejob(self, jobname, block, token):
assert type(block) == bool
assert type(jobname) == str
assert token is None or isinstance(token, str)
job = self.api.get_job(jobname)
job.invoke(securitytoken=token, block=block)
def main( ):
logging.basicConfig()
logging.getLogger("").setLevel(logging.INFO)
jenkins_invoke.main() | 0.007507 |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: netapp_e_host
short_description: manage eseries hosts
description:
- Create, update, remove hosts on NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- the id of the storage array you wish to act against
required: True
name:
description:
- If the host doesnt yet exist, the label to assign at creation time.
- If the hosts already exists, this is what is used to identify the host to apply any desired changes
required: True
host_type_index:
description:
- The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
required: True
ports:
description:
- a list of of dictionaries of host ports you wish to associate with the newly created host
required: False
group:
description:
- the group you want the host to be a member of
required: False
"""
EXAMPLES = """
- name: Set Host Info
netapp_e_host:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: "{{ host_name }}"
host_type_index: "{{ host_type_index }}"
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: The host has been created.
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data is None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class Host(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
group=dict(type='str', required=False),
ports=dict(type='list', required=False),
force_port=dict(type='bool', default=False),
name=dict(type='str', required=True),
host_type_index=dict(type='int', required=True)
))
self.module = AnsibleModule(argument_spec=argument_spec)
args = self.module.params
self.group = args['group']
self.ports = args['ports']
self.force_port = args['force_port']
self.name = args['name']
self.host_type_index = args['host_type_index']
self.state = args['state']
self.ssid = args['ssid']
self.url = args['api_url']
self.user = args['api_username']
self.pwd = args['api_password']
self.certs = args['validate_certs']
self.ports = args['ports']
self.post_body = dict()
if not self.url.endswith('/'):
self.url += '/'
@property
def valid_host_type(self):
try:
(rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
try:
match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0]
return True
except IndexError:
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
@property
def hostports_available(self):
used_ids = list()
try:
(rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid,
url_password=self.pwd, url_username=self.user,
validate_certs=self.certs,
headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports):
for port in self.ports:
for free_port in self.available_ports:
# Desired Type matches but also make sure we havent already used the ID
if not free_port['id'] in used_ids:
# update the port arg to have an id attribute
used_ids.append(free_port['id'])
break
if len(used_ids) != len(self.ports) and not self.force_port:
self.module.fail_json(
msg="There are not enough free host ports with the specified port types to proceed")
else:
return True
else:
self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
@property
def group_id(self):
if self.group:
try:
(rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
try:
group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0]
return group_obj['id']
except IndexError:
self.module.fail_json(msg="No group with the name: %s exists" % self.group)
else:
# Return the value equivalent of no group
return "0000000000000000000000000000000000000000"
@property
def host_exists(self):
try:
(rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
self.all_hosts = all_hosts
try: # Try to grab the host object
self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0]
return True
except IndexError:
# Host with the name passed in does not exist
return False
@property
def needs_update(self):
needs_update = False
self.force_port_update = False
if self.host_obj['clusterRef'] != self.group_id or \
self.host_obj['hostTypeIndex'] != self.host_type_index:
needs_update = True
if self.ports:
if not self.host_obj['ports']:
needs_update = True
for arg_port in self.ports:
# First a quick check to see if the port is mapped to a different host
if not self.port_on_diff_host(arg_port):
for obj_port in self.host_obj['ports']:
if arg_port['label'] == obj_port['label']:
# Confirmed that port arg passed in exists on the host
# port_id = self.get_port_id(obj_port['label'])
if arg_port['type'] != obj_port['portId']['ioInterfaceType']:
needs_update = True
if 'iscsiChapSecret' in arg_port:
# No way to know the current secret attr, so always return True just in case
needs_update = True
else:
# If the user wants the ports to be reassigned, do it
if self.force_port:
self.force_port_update = True
needs_update = True
else:
self.module.fail_json(
msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
return needs_update
def port_on_diff_host(self, arg_port):
""" Checks to see if a passed in port arg is present on a different host """
for host in self.all_hosts:
# Only check 'other' hosts
if self.host_obj['name'] != self.name:
for port in host['ports']:
# Check if the port label is found in the port dict list of each host
if arg_port['label'] == port['label']:
self.other_host = host
return True
return False
def reassign_ports(self, apply=True):
if not self.post_body:
self.post_body = dict(
portsToUpdate=dict()
)
for port in self.ports:
if self.port_on_diff_host(port):
self.post_body['portsToUpdate'].update(dict(
portRef=self.other_host['hostPortRef'],
hostRef=self.host_obj['id'],
# Doesnt yet address port identifier or chap secret
))
if apply:
try:
(rc, self.host_obj) = request(
self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except:
err = get_exception()
self.module.fail_json(
msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % (
self.host_obj['id'], self.ssid, str(err)))
def update_host(self):
if self.ports:
if self.hostports_available:
if self.force_port_update is True:
self.reassign_ports(apply=False)
# Make sure that only ports that arent being reassigned are passed into the ports attr
self.ports = [port for port in self.ports if not self.port_on_diff_host(port)]
self.post_body['ports'] = self.ports
if self.group:
self.post_body['groupId'] = self.group_id
self.post_body['hostType'] = dict(index=self.host_type_index)
try:
(rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
url_username=self.user, url_password=self.pwd, headers=HEADERS,
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
except:
err = get_exception()
self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
self.module.exit_json(changed=True, **self.host_obj)
def create_host(self):
post_body = dict(
name=self.name,
host_type=dict(index=self.host_type_index),
groupId=self.group_id,
ports=self.ports
)
if self.ports:
# Check that all supplied port args are valid
if self.hostports_available:
post_body.update(ports=self.ports)
elif not self.force_port:
self.module.fail_json(
msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports")
if not self.host_exists:
try:
(rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
data=json.dumps(post_body), headers=HEADERS)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
else:
self.module.exit_json(changed=False,
msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name))
self.host_obj = create_resp
if self.ports and self.force_port:
self.reassign_ports()
self.module.exit_json(changed=True, **self.host_obj)
def remove_host(self):
try:
(rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
method='DELETE',
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(
msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
self.ssid,
str(err)))
def apply(self):
if self.state == 'present':
if self.host_exists:
if self.needs_update and self.valid_host_type:
self.update_host()
else:
self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name)
elif self.valid_host_type:
self.create_host()
else:
if self.host_exists:
self.remove_host()
self.module.exit_json(changed=True, msg="Host removed.")
else:
self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name)
def main():
host = Host()
host.apply()
if __name__ == '__main__':
main()
| 0.004325 |
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragments A list of fragments of the fragmented media,
with the following entries:
* "url" (mandatory) - fragment's URL
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = {}
last_media = {}
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
media = parse_m3u8_attributes(line)
media_type = media.get('TYPE')
if media_type in ('VIDEO', 'AUDIO'):
media_url = media.get('URI')
if media_url:
format_id = []
for v in (media.get('GROUP-ID'), media.get('NAME')):
if v:
format_id.append(v)
formats.append({
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'vcodec': 'none' if media_type == 'AUDIO' else None,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
})
else:
# When there is no URI in EXT-X-MEDIA let this tag's
# data be used by regular URI lines below
last_media = media
elif line.startswith('#') or not line.strip():
continue
else:
tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media.get('NAME')
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': manifest_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
formats.append(f)
last_info = {}
last_media = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
def combine_url(base_url, target_url):
if re.match(r'^https?://', target_url):
return target_url
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth|Time)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth|Time)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
'url': media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth'),
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': representation_attrib.get('bandwidth'),
'Number': segment_number,
}
representation_ms_info['fragments'].append({
'url': segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
s_num = 0
for segment_url in representation_ms_info['segment_urls']:
s = representation_ms_info['s'][s_num]
for r in range(s.get('r', 0) + 1):
fragments.append({
'url': segment_url,
'duration': float_or_none(s['d'], representation_ms_info['timescale']),
})
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({'url': initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
for fragment in f['fragments']:
fragment['url'] = combine_url(base_url, fragment['url'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8'):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
if determine_ext(full_url) == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind == 'subtitles':
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id):
formats = []
f4m_url = re.sub(r'(https?://.+?)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
formats.extend(self._extract_f4m_formats(
update_url_query(f4m_url, {'hdcore': '3.7.0'}),
video_id, f4m_id='hds', fatal=False))
m3u8_url = re.sub(r'(https?://.+?)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
http_base_url = 'http' + url_base
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': protocol + url_base,
'format_id': protocol,
'protocol': protocol,
})
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| 0.002253 |
import logging
import unittest
from itertools import *
"""GenomicRangeQuery (https://codility.com/demo/take-sample-test/genomic_range_query/)
Analysis:
- for every p,q , partial sum S[p+1..q] = S_q - S_p+1
= count(A) + ... + count(T) of segment S[p..q]
- S could be accumulated by prefix sum
"""
__author__ = 'au9ustine'
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
def get_partial_sum(sum_q, sum_p):
return list(starmap(lambda x, y: x-y, izip(sum_q, sum_p)))
def solution(S, P, Q):
mappings = {'A': 1, 'C': 2, 'G': 3, 'T': 4}
n = len(S)
sums = list(repeat([0]*4, n + 1))
for i in xrange(1, n+1):
sums[i] = sums[i-1][:]
sums[i][mappings[S[i-1]]-1] += 1
result = [0] * len(P)
for i in xrange(len(P)):
partial_sum = get_partial_sum(sums[Q[i]+1], sums[P[i]])
if partial_sum[0] != 0:
result[i] = 1
elif partial_sum[1] != 0:
result[i] = 2
elif partial_sum[2] != 0:
result[i] = 3
else:
result[i] = 4
return result
class SolutionTest(unittest.TestCase):
def setUp(self):
self.data = [
(("CAGCCTA", [2, 5, 0], [4, 5, 6]), [2, 4, 1]),
]
def test_solution(self):
for input_data, expected in self.data:
actual = solution(*input_data)
self.assertEquals(expected, actual)
if __name__ == "__main__":
unittest.main(failfast=True) | 0.002039 |
import decimal
try:
import thread
except ImportError:
import dummy_thread as thread
from threading import local
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_commit(sid)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| 0.001635 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Stephane Chamberland <[email protected]>
# Copyright: LGPL 2.1
# . s.ssmuse.dot /ssm/net/hpcs/201402/02/base \
# /ssm/net/hpcs/201402/02/intel13sp1u2 \
# /ssm/net/rpn/libs/15.2 \
# /ssm/net/cmdn/tests/vgrid/6.0.0-a3/intel13sp1u2
"""
Module vgd is a ctypes import of vgrid's library (libvgrid.so)
The libvgrid.so library is provided with the VGrid Descriptor package
developed at CMC/RPN by R.McTaggartCowan and A.Plante
The vgd python module includes
- python wrapper to main libvgrid's C functions
- helper functions
- prototypes for many libvgrid's C functions
- pre-defined constants
- along with comprenhensive inline documentation
See also:
rpnpy.vgd.proto
rpnpy.vgd.const
rpnpy.vgd.base
"""
from rpnpy.version import *
__SUBMODULES__ = ['proto', 'const', 'base']
__all__ = ['loadVGDlib', 'libvgd', 'VGD_VERSION', 'VGD_LIBPATH',
'VGDError'] + __SUBMODULES__
## VGD_VERSION_DEFAULT = '_rpnpy'
VGD_VERSION_DEFAULT = '*'
class VGDError(Exception):
"""
General VGD module error/exception
"""
pass
def checkVGDlibPath(libfile):
"""
Return first matched filename for libfile wildcard
Return None if no match
"""
import os
import glob
LIBPATH_ALL = glob.glob(libfile)
if len(LIBPATH_ALL) > 0:
if os.path.isfile(LIBPATH_ALL[0]):
return LIBPATH_ALL[0]
return None
def loadVGDlib(vgd_version=None):
"""
Import libvgrid.so using ctypes
Args:
vgd_version (str): libvgrid version number to load
Default: RPNPY_VGD_VERSION Env.Var.
VGD_VERSION_DEFAULT if not RPNPY_VGD_VERSION
Returns:
(VGD_VERSION, VGD_LIBPATH, libvgd)
where:
VGD_VERSION (str) : loaded libvgd version
VGD_LIBPATH (str) : path to loaded libvgd shared lib
libvgd (CDLL) : ctypes library object for libvgd.so
Library 'libvgdVERSION.so' is searched into the Env.Var. paths:
PYTHONPATH, EC_LD_LIBRARY_PATH, LD_LIBRARY_PATH
"""
import os
import ctypes as ct
## import numpy as np
## import numpy.ctypeslib as npct
if vgd_version is None:
VGD_VERSION = os.getenv('RPNPY_VGD_VERSION',
VGD_VERSION_DEFAULT).strip()
else:
VGD_VERSION = vgd_version
vgd_libfile = 'libvgridshared' + VGD_VERSION.strip() + '.so'
pylibpath = os.getenv('PYTHONPATH','').split(':')
ldlibpath = os.getenv('LD_LIBRARY_PATH','').split(':')
eclibpath = os.getenv('EC_LD_LIBRARY_PATH','').split()
VGD_LIBPATH = checkVGDlibPath(vgd_libfile)
if not VGD_LIBPATH:
for path in pylibpath + ldlibpath + eclibpath:
VGD_LIBPATH = checkVGDlibPath(os.path.join(path.strip(), vgd_libfile))
if VGD_LIBPATH:
break
if not VGD_LIBPATH:
raise IOError(-1, 'Failed to find libvgrid.so: ', vgd_libfile)
VGD_LIBPATH = os.path.abspath(VGD_LIBPATH)
libvgd = None
try:
libvgd = ct.cdll.LoadLibrary(VGD_LIBPATH)
#libvgd = np.ctypeslib.load_library(vgd_libfile, VGD_LIBPATH)
except IOError:
raise IOError('ERROR: cannot load libvgrid shared version: ' +
VGD_VERSION)
return (VGD_VERSION, VGD_LIBPATH, libvgd)
(VGD_VERSION, VGD_LIBPATH, libvgd) = loadVGDlib()
if __name__ == "__main__":
import doctest
doctest.testmod()
# -*- Mode: C; tab-width: 4; indent-tabs-mode: nil -*-
# vim: set expandtab ts=4 sw=4:
# kate: space-indent on; indent-mode cstyle; indent-width 4; mixedindent off;
| 0.004857 |
"""Support for switches which integrates with other components."""
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SWITCHES,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
ON_ACTION = "turn_on"
OFF_ACTION = "turn_off"
SWITCH_SCHEMA = vol.All(
cv.deprecated(ATTR_ENTITY_ID),
vol.Schema(
{
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template switches."""
switches = []
for device, device_config in config[CONF_SWITCHES].items():
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
on_action = device_config[ON_ACTION]
off_action = device_config[OFF_ACTION]
unique_id = device_config.get(CONF_UNIQUE_ID)
switches.append(
SwitchTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
)
)
return switches
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template switches."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class SwitchTemplate(TemplateEntity, SwitchEntity, RestoreEntity):
"""Representation of a Template switch."""
def __init__(
self,
hass,
device_id,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
):
"""Initialize the Template switch."""
super().__init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
domain = __name__.split(".")[-2]
self._on_script = Script(hass, on_action, friendly_name, domain)
self._off_script = Script(hass, off_action, friendly_name, domain)
self._state = False
self._unique_id = unique_id
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._state = None
return
if isinstance(result, bool):
self._state = result
return
if isinstance(result, str):
self._state = result.lower() in ("true", STATE_ON)
return
self._state = False
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template is None:
# restore state after startup
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state == STATE_ON
# no need to listen for events
else:
self.add_template_attribute(
"_state", self._template, None, self._update_state
)
await super().async_added_to_hass()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this switch."""
return self._unique_id
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_turn_on(self, **kwargs):
"""Fire the on action."""
await self._on_script.async_run(context=self._context)
if self._template is None:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Fire the off action."""
await self._off_script.async_run(context=self._context)
if self._template is None:
self._state = False
self.async_write_ha_state()
@property
def assumed_state(self):
"""State is assumed, if no template given."""
return self._template is None
| 0.00032 |
import os.path
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_array_equal,
suppress_warnings)
from pytest import raises as assert_raises
import scipy.ndimage as ndimage
from . import types
class Test_measurements_stats:
"""ndimage.measurements._stats() is a utility used by other functions."""
def test_a(self):
x = [0, 1, 2, 6]
labels = [0, 0, 1, 1]
index = [0, 1]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage.measurements._stats(
x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_b(self):
# Same data as test_a, but different labels. The label 9 exceeds the
# length of 'labels', so this test will follow a different code path.
x = [0, 1, 2, 6]
labels = [0, 0, 9, 9]
index = [0, 9]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums = ndimage.measurements._stats(
x, labels=labels, index=index)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
def test_a_centered(self):
x = [0, 1, 2, 6]
labels = [0, 0, 1, 1]
index = [0, 1]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_b_centered(self):
x = [0, 1, 2, 6]
labels = [0, 0, 9, 9]
index = [0, 9]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
def test_nonint_labels(self):
x = [0, 1, 2, 6]
labels = [0.0, 0.0, 9.0, 9.0]
index = [0.0, 9.0]
for shp in [(4,), (2, 2)]:
x = np.array(x).reshape(shp)
labels = np.array(labels).reshape(shp)
counts, sums, centers = ndimage.measurements._stats(
x, labels=labels, index=index, centered=True)
assert_array_equal(counts, [2, 2])
assert_array_equal(sums, [1.0, 8.0])
assert_array_equal(centers, [0.5, 8.0])
class Test_measurements_select:
"""ndimage.measurements._select() is a utility used by other functions."""
def test_basic(self):
x = [0, 1, 6, 2]
cases = [
([0, 0, 1, 1], [0, 1]), # "Small" integer labels
([0, 0, 9, 9], [0, 9]), # A label larger than len(labels)
([0.0, 0.0, 7.0, 7.0], [0.0, 7.0]), # Non-integer labels
]
for labels, index in cases:
result = ndimage.measurements._select(
x, labels=labels, index=index)
assert_(len(result) == 0)
result = ndimage.measurements._select(
x, labels=labels, index=index, find_max=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [1, 6])
result = ndimage.measurements._select(
x, labels=labels, index=index, find_min=True)
assert_(len(result) == 1)
assert_array_equal(result[0], [0, 2])
result = ndimage.measurements._select(
x, labels=labels, index=index, find_min=True,
find_min_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [0, 2])
assert_array_equal(result[1], [0, 3])
assert_equal(result[1].dtype.kind, 'i')
result = ndimage.measurements._select(
x, labels=labels, index=index, find_max=True,
find_max_positions=True)
assert_(len(result) == 2)
assert_array_equal(result[0], [1, 6])
assert_array_equal(result[1], [1, 2])
assert_equal(result[1].dtype.kind, 'i')
def test_label01():
data = np.ones([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 1)
assert_equal(n, 1)
def test_label02():
data = np.zeros([])
out, n = ndimage.label(data)
assert_array_almost_equal(out, 0)
assert_equal(n, 0)
def test_label03():
data = np.ones([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1])
assert_equal(n, 1)
def test_label04():
data = np.zeros([1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [0])
assert_equal(n, 0)
def test_label05():
data = np.ones([5])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
assert_equal(n, 1)
def test_label06():
data = np.array([1, 0, 1, 1, 0, 1])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [1, 0, 2, 2, 0, 3])
assert_equal(n, 3)
def test_label07():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(n, 0)
def test_label08():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
out, n = ndimage.label(data)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
assert_equal(n, 4)
def test_label09():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[2, 2, 0, 0, 0, 0],
[2, 2, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 0]])
assert_equal(n, 3)
def test_label10():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
struct = ndimage.generate_binary_structure(2, 2)
out, n = ndimage.label(data, struct)
assert_array_almost_equal(out, [[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(n, 1)
def test_label11():
for type in types:
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 4)
def test_label11_inplace():
for type in types:
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]], type)
n = ndimage.label(data, output=data)
expected = [[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]]
assert_array_almost_equal(data, expected)
assert_equal(n, 4)
def test_label12():
for type in types:
data = np.array([[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]], type)
out, n = ndimage.label(data)
expected = [[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 0]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_label13():
for type in types:
data = np.array([[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
type)
out, n = ndimage.label(data)
expected = [[1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert_array_almost_equal(out, expected)
assert_equal(n, 1)
def test_label_output_typed():
data = np.ones([5])
for t in types:
output = np.zeros([5], dtype=t)
n = ndimage.label(data, output=output)
assert_array_almost_equal(output, 1)
assert_equal(n, 1)
def test_label_output_dtype():
data = np.ones([5])
for t in types:
output, n = ndimage.label(data, output=t)
assert_array_almost_equal(output, 1)
assert output.dtype == t
def test_label_output_wrong_size():
data = np.ones([5])
for t in types:
output = np.zeros([10], t)
assert_raises((RuntimeError, ValueError),
ndimage.label, data, output=output)
def test_label_structuring_elements():
data = np.loadtxt(os.path.join(os.path.dirname(
__file__), "data", "label_inputs.txt"))
strels = np.loadtxt(os.path.join(
os.path.dirname(__file__), "data", "label_strels.txt"))
results = np.loadtxt(os.path.join(
os.path.dirname(__file__), "data", "label_results.txt"))
data = data.reshape((-1, 7, 7))
strels = strels.reshape((-1, 3, 3))
results = results.reshape((-1, 7, 7))
r = 0
for i in range(data.shape[0]):
d = data[i, :, :]
for j in range(strels.shape[0]):
s = strels[j, :, :]
assert_equal(ndimage.label(d, s)[0], results[r, :, :])
r += 1
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
_ = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3, 1240, 1240)
a = np.random.rand(np.prod(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60, 320))
d[:, :257] = 1
d[:, 260:] = 1
d[36, 257] = 1
d[35, 258] = 1
d[35, 259] = 1
assert ndimage.label(d, np.ones((3, 3)))[1] == 1
def test_label_default_dtype():
test_array = np.random.rand(10, 10)
label, no_features = ndimage.label(test_array > 0.5)
assert_(label.dtype in (np.int32, np.int64))
# Shouldn't raise an exception
ndimage.find_objects(label)
def test_find_objects01():
data = np.ones([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [()])
def test_find_objects02():
data = np.zeros([], dtype=int)
out = ndimage.find_objects(data)
assert_(out == [])
def test_find_objects03():
data = np.ones([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),)])
def test_find_objects04():
data = np.zeros([1], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects05():
data = np.ones([5], dtype=int)
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 5, None),)])
def test_find_objects06():
data = np.array([1, 0, 2, 2, 0, 3])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None),),
(slice(2, 4, None),),
(slice(5, 6, None),)])
def test_find_objects07():
data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [])
def test_find_objects08():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
(slice(3, 5, None), slice(0, 2, None)),
(slice(5, 6, None), slice(3, 5, None))])
def test_find_objects09():
data = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 2, 2, 0, 0],
[0, 0, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 0]])
out = ndimage.find_objects(data)
assert_equal(out, [(slice(0, 1, None), slice(0, 1, None)),
(slice(1, 3, None), slice(2, 5, None)),
None,
(slice(5, 6, None), slice(3, 5, None))])
def test_sum01():
for type in types:
input = np.array([], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum02():
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input)
assert_equal(output, 0.0)
def test_sum03():
for type in types:
input = np.ones([], type)
output = ndimage.sum(input)
assert_almost_equal(output, 1.0)
def test_sum04():
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input)
assert_almost_equal(output, 3.0)
def test_sum05():
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input)
assert_almost_equal(output, 10.0)
def test_sum06():
labels = np.array([], bool)
for type in types:
input = np.array([], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum07():
labels = np.ones([0, 4], bool)
for type in types:
input = np.zeros([0, 4], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 0.0)
def test_sum08():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([1, 2], type)
output = ndimage.sum(input, labels=labels)
assert_equal(output, 1.0)
def test_sum09():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 4.0)
def test_sum10():
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.sum(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_sum11():
labels = np.array([1, 2], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels,
index=2)
assert_almost_equal(output, 6.0)
def test_sum12():
labels = np.array([[1, 2], [2, 4]], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.sum(input, labels=labels, index=[4, 8, 2])
assert_array_almost_equal(output, [4.0, 0.0, 5.0])
def test_sum_labels():
labels = np.array([[1, 2], [2, 4]], np.int8)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output_sum = ndimage.sum(input, labels=labels, index=[4, 8, 2])
output_labels = ndimage.sum_labels(
input, labels=labels, index=[4, 8, 2])
assert (output_sum == output_labels).all()
assert_array_almost_equal(output_labels, [4.0, 0.0, 5.0])
def test_mean01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 2.0)
def test_mean02():
labels = np.array([1, 0], bool)
input = np.array([[1, 2], [3, 4]], bool)
output = ndimage.mean(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_mean03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=2)
assert_almost_equal(output, 3.0)
def test_mean04():
labels = np.array([[1, 2], [2, 4]], np.int8)
with np.errstate(all='ignore'):
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.mean(input, labels=labels,
index=[4, 8, 2])
assert_array_almost_equal(output[[0, 2]], [4.0, 2.5])
assert_(np.isnan(output[1]))
def test_minimum01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_minimum02():
labels = np.array([1, 0], bool)
input = np.array([[2, 2], [2, 4]], bool)
output = ndimage.minimum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_minimum03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels,
index=2)
assert_almost_equal(output, 2.0)
def test_minimum04():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum(input, labels=labels,
index=[2, 3, 8])
assert_array_almost_equal(output, [2.0, 4.0, 0.0])
def test_maximum01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, 3.0)
def test_maximum02():
labels = np.array([1, 0], bool)
input = np.array([[2, 2], [2, 4]], bool)
output = ndimage.maximum(input, labels=labels)
assert_almost_equal(output, 1.0)
def test_maximum03():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels,
index=2)
assert_almost_equal(output, 4.0)
def test_maximum04():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum(input, labels=labels,
index=[2, 3, 8])
assert_array_almost_equal(output, [3.0, 4.0, 0.0])
def test_maximum05():
# Regression test for ticket #501 (Trac)
x = np.array([-3, -2, -1])
assert_equal(ndimage.maximum(x), -1)
def test_median01():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = np.array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels, index=[1, 2, 3])
assert_array_almost_equal(output, [2.5, 4.0, 6.0])
def test_median02():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
output = ndimage.median(a)
assert_almost_equal(output, 1.0)
def test_median03():
a = np.array([[1, 2, 0, 1],
[5, 3, 0, 4],
[0, 0, 0, 7],
[9, 3, 0, 0]])
labels = np.array([[1, 1, 0, 2],
[1, 1, 0, 2],
[0, 0, 0, 2],
[3, 3, 0, 0]])
output = ndimage.median(a, labels=labels)
assert_almost_equal(output, 3.0)
def test_median_gh12836_bool():
# test boolean addition fix on example from gh-12836
a = np.asarray([1, 1], dtype=bool)
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
assert_array_almost_equal(output, [1.0])
def test_median_no_int_overflow():
# test integer overflow fix on example from gh-12836
a = np.asarray([65, 70], dtype=np.int8)
output = ndimage.median(a, labels=np.ones((2,)), index=[1])
assert_array_almost_equal(output, [67.5])
def test_variance01():
with np.errstate(all='ignore'):
for type in types:
input = np.array([], type)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
output = ndimage.variance(input)
assert_(np.isnan(output))
def test_variance02():
for type in types:
input = np.array([1], type)
output = ndimage.variance(input)
assert_almost_equal(output, 0.0)
def test_variance03():
for type in types:
input = np.array([1, 3], type)
output = ndimage.variance(input)
assert_almost_equal(output, 1.0)
def test_variance04():
input = np.array([1, 0], bool)
output = ndimage.variance(input)
assert_almost_equal(output, 0.25)
def test_variance05():
labels = [2, 2, 3]
for type in types:
input = np.array([1, 3, 8], type)
output = ndimage.variance(input, labels, 2)
assert_almost_equal(output, 1.0)
def test_variance06():
labels = [2, 2, 3, 3, 4]
with np.errstate(all='ignore'):
for type in types:
input = np.array([1, 3, 8, 10, 8], type)
output = ndimage.variance(input, labels, [2, 3, 4])
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
def test_standard_deviation01():
with np.errstate(all='ignore'):
for type in types:
input = np.array([], type)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
output = ndimage.standard_deviation(input)
assert_(np.isnan(output))
def test_standard_deviation02():
for type in types:
input = np.array([1], type)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, 0.0)
def test_standard_deviation03():
for type in types:
input = np.array([1, 3], type)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, np.sqrt(1.0))
def test_standard_deviation04():
input = np.array([1, 0], bool)
output = ndimage.standard_deviation(input)
assert_almost_equal(output, 0.5)
def test_standard_deviation05():
labels = [2, 2, 3]
for type in types:
input = np.array([1, 3, 8], type)
output = ndimage.standard_deviation(input, labels, 2)
assert_almost_equal(output, 1.0)
def test_standard_deviation06():
labels = [2, 2, 3, 3, 4]
with np.errstate(all='ignore'):
for type in types:
input = np.array([1, 3, 8, 10, 8], type)
output = ndimage.standard_deviation(input, labels, [2, 3, 4])
assert_array_almost_equal(output, [1.0, 1.0, 0.0])
def test_standard_deviation07():
labels = [1]
with np.errstate(all='ignore'):
for type in types:
input = np.array([-0.00619519], type)
output = ndimage.standard_deviation(input, labels, [1])
assert_array_almost_equal(output, [0])
def test_minimum_position01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.minimum_position(input, labels=labels)
assert_equal(output, (0, 0))
def test_minimum_position02():
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input)
assert_equal(output, (1, 2))
def test_minimum_position03():
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], bool)
output = ndimage.minimum_position(input)
assert_equal(output, (1, 2))
def test_minimum_position04():
input = np.array([[5, 4, 2, 5],
[3, 7, 1, 2],
[1, 5, 1, 1]], bool)
output = ndimage.minimum_position(input)
assert_equal(output, (0, 0))
def test_minimum_position05():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 2, 3]], type)
output = ndimage.minimum_position(input, labels)
assert_equal(output, (2, 0))
def test_minimum_position06():
labels = [1, 2, 3, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input, labels, 2)
assert_equal(output, (0, 1))
def test_minimum_position07():
labels = [1, 2, 3, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 0, 2],
[1, 5, 1, 1]], type)
output = ndimage.minimum_position(input, labels,
[2, 3])
assert_equal(output[0], (0, 1))
assert_equal(output[1], (1, 2))
def test_maximum_position01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output = ndimage.maximum_position(input,
labels=labels)
assert_equal(output, (1, 0))
def test_maximum_position02():
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input)
assert_equal(output, (1, 2))
def test_maximum_position03():
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], bool)
output = ndimage.maximum_position(input)
assert_equal(output, (0, 0))
def test_maximum_position04():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels)
assert_equal(output, (1, 1))
def test_maximum_position05():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels, 1)
assert_equal(output, (0, 0))
def test_maximum_position06():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels,
[1, 2])
assert_equal(output[0], (0, 0))
assert_equal(output[1], (1, 1))
def test_maximum_position07():
# Test float labels
labels = np.array([1.0, 2.5, 0.0, 4.5])
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output = ndimage.maximum_position(input, labels,
[1.0, 4.5])
assert_equal(output[0], (0, 0))
assert_equal(output[1], (0, 3))
def test_extrema01():
labels = np.array([1, 0], bool)
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels)
output2 = ndimage.minimum(input, labels=labels)
output3 = ndimage.maximum(input, labels=labels)
output4 = ndimage.minimum_position(input,
labels=labels)
output5 = ndimage.maximum_position(input,
labels=labels)
assert_equal(output1, (output2, output3, output4, output5))
def test_extrema02():
labels = np.array([1, 2])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels,
index=2)
output2 = ndimage.minimum(input, labels=labels,
index=2)
output3 = ndimage.maximum(input, labels=labels,
index=2)
output4 = ndimage.minimum_position(input,
labels=labels, index=2)
output5 = ndimage.maximum_position(input,
labels=labels, index=2)
assert_equal(output1, (output2, output3, output4, output5))
def test_extrema03():
labels = np.array([[1, 2], [2, 3]])
for type in types:
input = np.array([[1, 2], [3, 4]], type)
output1 = ndimage.extrema(input, labels=labels,
index=[2, 3, 8])
output2 = ndimage.minimum(input, labels=labels,
index=[2, 3, 8])
output3 = ndimage.maximum(input, labels=labels,
index=[2, 3, 8])
output4 = ndimage.minimum_position(input,
labels=labels, index=[2, 3, 8])
output5 = ndimage.maximum_position(input,
labels=labels, index=[2, 3, 8])
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert_array_almost_equal(output1[2], output4)
assert_array_almost_equal(output1[3], output5)
def test_extrema04():
labels = [1, 2, 0, 4]
for type in types:
input = np.array([[5, 4, 2, 5],
[3, 7, 8, 2],
[1, 5, 1, 1]], type)
output1 = ndimage.extrema(input, labels, [1, 2])
output2 = ndimage.minimum(input, labels, [1, 2])
output3 = ndimage.maximum(input, labels, [1, 2])
output4 = ndimage.minimum_position(input, labels,
[1, 2])
output5 = ndimage.maximum_position(input, labels,
[1, 2])
assert_array_almost_equal(output1[0], output2)
assert_array_almost_equal(output1[1], output3)
assert_array_almost_equal(output1[2], output4)
assert_array_almost_equal(output1[3], output5)
def test_center_of_mass01():
expected = [0.0, 0.0]
for type in types:
input = np.array([[1, 0], [0, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass02():
expected = [1, 0]
for type in types:
input = np.array([[0, 0], [1, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass03():
expected = [0, 1]
for type in types:
input = np.array([[0, 1], [0, 0]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass04():
expected = [1, 1]
for type in types:
input = np.array([[0, 0], [0, 1]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass05():
expected = [0.5, 0.5]
for type in types:
input = np.array([[1, 1], [1, 1]], type)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass06():
expected = [0.5, 0.5]
input = np.array([[1, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input)
assert_array_almost_equal(output, expected)
def test_center_of_mass07():
labels = [1, 0]
expected = [0.5, 0.0]
input = np.array([[1, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input, labels)
assert_array_almost_equal(output, expected)
def test_center_of_mass08():
labels = [1, 2]
expected = [0.5, 1.0]
input = np.array([[5, 2], [3, 1]], bool)
output = ndimage.center_of_mass(input, labels, 2)
assert_array_almost_equal(output, expected)
def test_center_of_mass09():
labels = [1, 2]
expected = [(0.5, 0.0), (0.5, 1.0)]
input = np.array([[1, 2], [1, 1]], bool)
output = ndimage.center_of_mass(input, labels, [1, 2])
assert_array_almost_equal(output, expected)
def test_histogram01():
expected = np.ones(10)
input = np.arange(10)
output = ndimage.histogram(input, 0, 10, 10)
assert_array_almost_equal(output, expected)
def test_histogram02():
labels = [1, 1, 1, 1, 2, 2, 2, 2]
expected = [0, 2, 0, 1, 1]
input = np.array([1, 1, 3, 4, 3, 3, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, 1)
assert_array_almost_equal(output, expected)
def test_histogram03():
labels = [1, 0, 1, 1, 2, 2, 2, 2]
expected1 = [0, 1, 0, 1, 1]
expected2 = [0, 0, 0, 3, 0]
input = np.array([1, 1, 3, 4, 3, 5, 3, 3])
output = ndimage.histogram(input, 0, 4, 5, labels, (1, 2))
assert_array_almost_equal(output[0], expected1)
assert_array_almost_equal(output[1], expected2)
def test_stat_funcs_2d():
a = np.array([[5, 6, 0, 0, 0], [8, 9, 0, 0, 0], [0, 0, 0, 3, 5]])
lbl = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 2, 2]])
mean = ndimage.mean(a, labels=lbl, index=[1, 2])
assert_array_equal(mean, [7.0, 4.0])
var = ndimage.variance(a, labels=lbl, index=[1, 2])
assert_array_equal(var, [2.5, 1.0])
std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))
med = ndimage.median(a, labels=lbl, index=[1, 2])
assert_array_equal(med, [7.0, 4.0])
min = ndimage.minimum(a, labels=lbl, index=[1, 2])
assert_array_equal(min, [5, 3])
max = ndimage.maximum(a, labels=lbl, index=[1, 2])
assert_array_equal(max, [9, 5])
class TestWatershedIft:
def test_watershed_ift01(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers, structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift02(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift03(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift04(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift05(self):
data = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift06(self):
data = np.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift07(self):
shape = (7, 6)
data = np.zeros(shape, dtype=np.uint8)
data = data.transpose()
data[...] = np.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int8)
out = np.zeros(shape, dtype=np.int16)
out = out.transpose()
ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
output=out)
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift08(self):
# Test cost larger than uint8. See gh-10069.
shape = (2, 2)
data = np.array([[256, 0],
[0, 0]], np.uint16)
markers = np.array([[1, 0],
[0, 0]], np.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[1, 1],
[1, 1]]
assert_array_almost_equal(out, expected)
| 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.