text
stringlengths 820
1M
| score
float64 0
0.24
|
---|---|
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render
from .forms import ContactForm, SignUpForm
from .models import SignUp
# Create your views here.
def home(request):
title = 'Sign Up Now'
form = SignUpForm(request.POST or None)
context = {
"title": title,
"form": form
}
if form.is_valid():
#form.save()
#print request.POST['email'] #not recommended
instance = form.save(commit=False)
full_name = form.cleaned_data.get("full_name")
if not full_name:
full_name = "New full name"
instance.full_name = full_name
# if not instance.full_name:
# instance.full_name = "Justin"
instance.save()
context = {
"title": "Thank you"
}
if request.user.is_authenticated() and request.user.is_staff:
#print(SignUp.objects.all())
# i = 1
# for instance in SignUp.objects.all():
# print(i)
# print(instance.full_name)
# i += 1
queryset = SignUp.objects.all().order_by('-timestamp') #.filter(full_name__iexact="Justin")
#print(SignUp.objects.all().order_by('-timestamp').filter(full_name__iexact="Justin").count())
context = {
"queryset": queryset
}
return render(request, "home.html", context)
def contact(request):
title = 'Contact Us'
title_align_center = True
form = ContactForm(request.POST or None)
if form.is_valid():
# for key, value in form.cleaned_data.iteritems():
# print key, value
# #print form.cleaned_data.get(key)
form_email = form.cleaned_data.get("email")
form_message = form.cleaned_data.get("message")
form_full_name = form.cleaned_data.get("full_name")
# print email, message, full_name
subject = 'Site contact form'
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, '[email protected]']
contact_message = "%s: %s via %s"%(
form_full_name,
form_message,
form_email)
some_html_message = """
<h1>hello</h1>
"""
send_mail(subject,
contact_message,
from_email,
to_email,
html_message=some_html_message,
fail_silently=True)
context = {
"form": form,
"title": title,
"title_align_center": title_align_center,
}
return render(request, "forms.html", context)
| 0.040835 |
import oebakery
from oebakery import die, err, warn, info, debug
import os
import operator
import bb
# Handle all the arhicture related variables.
# To be able to reuse definitions for both build, machine and sdk
# architectures, the usual bitbake variables are not used, but a more
# hierarchical setup using a number of Python dictionaries.
gccspecs = {}
cpuspecs = {
'm68k' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, foobar',
},
'mcf51' : {
'mcpu' : '51',
},
'mcf51ac' : {
'mcpu' : '51ac',
},
'mcf51cn' : {
'mcpu' : '51cn',
},
'mcf51em' : {
'mcpu' : '51em',
},
'mcf51qe' : {
'mcpu' : '51qe',
},
'mcf5206' : {
'mcpu' : '5206',
},
'mcf5206e' : {
'mcpu' : '5206e',
},
'mcf5208' : {
'mcpu' : '5208',
},
'mcf52277' : {
'mcpu' : '52277',
},
},
'powerpc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, PowerPC or cisco 4500',
},
'603e' : {
'mcpu' : '603e',
'float' : 'hard',
},
'e300c1' : {
'mcpu' : 'e300c1',
'float' : 'hard',
},
'e300c2' : {
'mcpu' : 'e300c2',
},
'e300c3' : {
'mcpu' : 'e300c3',
'float' : 'hard',
},
'e300c4' : {
'mcpu' : 'e300c4',
'float' : 'hard',
},
},
'powerpc64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'b',
},
},
'arm' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, ARM',
'abi flags' : [
['arm abi', 'eabi', {
'eabi' : {
'os' : 'eabi',
},
# Currently, OE-lite does only support EABI for
# ARM. When/if OABI is added, os should be kept as
# linux-gnu for OABI
}
],
]
},
'920t' : {
'mcpu' : 'arm920t',
'mtune' : 'arm920t',
},
'926ejs' : {
'march' : 'armv5te',
'mcpu' : 'arm926ej-s',
'mtune' : 'arm926ej-s',
},
'1176jzfs' : {
'march' : 'armv6',
'mcpu' : 'arm1176jzf-s',
'mtune' : 'arm1176jzf-s',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'vfp',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'vfp',
},
'soft' : {
'float' : 'soft',
},
}
]
]
},
'cortexa7' : {
'mcpu' : 'cortex-a7',
'mtune' : 'cortex-a7',
'abi flags' : [
['float abi', 'softfp', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon-vfpv4',
'vendor' : 'hf',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon-vfpv4',
'vendor' : '',
},
'soft' : {
'float' : 'soft',
'vendor' : 'soft',
},
}
],
['instruction set', 'thumb', {
'arm' : { },
'thumb' : {
'thumb' : '1',
'vendor' : 't',
},
}
],
]
},
'cortexa8' : {
'mcpu' : 'cortex-a8',
'mtune' : 'cortex-a8',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
'cortexa9' : {
'mcpu' : 'cortex-a9',
'mtune' : 'cortex-a9',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
},
'armeb' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'avr32' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mips' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mipsel' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sparc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'bfin' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh3' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh4' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'i386' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i386',
'fpu' : '387',
'float' : 'hard',
},
},
'i486' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i486',
'fpu' : '387',
'float' : 'hard',
},
'winchipc6' : {
'march' : 'winchip-c6',
},
'winchip2' : {
'march' : 'winchip2',
},
},
'i586' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i586',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium-mmx',
},
'k6' : {
'march' : 'k6',
},
'k62' : {
'march' : 'k6-2',
},
'geode' : {
'march' : 'geode',
},
'c3' : {
'march' : 'c3',
},
'c32' : {
'march' : 'c3-2',
},
},
'i686' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i686',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium2',
},
'sse' : {
'march' : 'pentium3',
'fpu' : 'sse',
},
'sse2' : {
'march' : 'pentium-m',
'fpu' : 'sse',
},
'athlon' : {
'march' : 'athlon',
},
'athlon4' : {
'march' : 'athlon-4',
'fpu' : 'sse',
},
},
'i786' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'pentium4',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'prescott',
},
},
'x86_64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
'elf' : 'ELF 64-bit LSB .*, x86-64',
'march' : 'opteron',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'k8-sse3',
},
'nocona' : {
'march' : 'nocona',
},
'core2' : {
'march' : 'core2',
},
'atom' : {
'march' : 'atom',
},
'amdfam10' : {
'march' : 'amdfam10',
},
},
'ia64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
},
},
}
cpumap = {
'powerpc' : {
'mpc5121e' : 'e300c4',
'mpc5125' : 'e300c4',
'mpc8313' : 'e300c3',
'mpc8313e' : 'e300c3',
'mpc8360' : 'e300c1',
'mpc8270' : 'g2le',
},
'arm' : {
'at91rm9200' : '920t',
'at91sam9260' : '926ejs',
'omap3520' : ('cortexa8', ('omap3', 'omap')),
'omap3530' : ('cortexa8', ('omap3', 'omap')),
'omap4430' : ('cortexa9neon', ('omap4', 'omap')),
'omap4440' : ('cortexa9neon', ('omap4', 'omap')),
'imx21' : ('926ejs', 'imx'),
'imx23' : ('926ejs', 'mxs'),
'imx25' : ('926ejs', 'imx'),
'imx27' : ('926ejs', 'imx'),
'imx28' : ('926ejs', 'mxs'),
'imx280' : ('926ejs', ('imx28', 'mxs')),
'imx281' : ('926ejs', ('imx28', 'mxs')),
'imx283' : ('926ejs', ('imx28', 'mxs')),
'imx285' : ('926ejs', ('imx28', 'mxs')),
'imx286' : ('926ejs', ('imx28', 'mxs')),
'imx287' : ('926ejs', ('imx28', 'mxs')),
'imx31' : ('1136jfs', 'imx'),
'imx35' : ('1136jfs', 'imx'),
'imx51' : ('cortexa8', 'imx'),
'imx512' : ('cortexa8', ('imx51', 'imx')),
'imx513' : ('cortexa8', ('imx51', 'imx')),
'imx514' : ('cortexa8', ('imx51', 'imx')),
'imx515' : ('cortexa8', ('imx51', 'imx')),
'imx516' : ('cortexa8', ('imx51', 'imx')),
'imx53' : ('cortexa8', 'imx'),
'imx534' : ('cortexa8', ('imx53', 'imx')),
'imx535' : ('cortexa8', ('imx53', 'imx')),
'imx536' : ('cortexa8', ('imx53', 'imx')),
'imx537' : ('cortexa8', ('imx53', 'imx')),
'imx538' : ('cortexa8', ('imx53', 'imx')),
'imx6' : ('cortexa9', 'imx'),
'ls1021a' : ('cortexa7', ('ls102x', 'ls1', 'layerscape')),
'imx6sl' : ('cortexa9', ('imx6', 'imx')),
'imx6dl' : ('cortexa9', ('imx6', 'imx')),
'imx6q' : ('cortexa9', ('imx6', 'imx')),
},
'x86' : {
'celeronm575' : (('i686', 'sse2'),),
},
}
osspecs = {
'mingw32' : {
'exeext' : '.exe',
'elf' : 'PE32 .* for MS Windows .* Intel 80386 32-bit',
},
}
def init(d):
sanity(d)
gcc_version = d.get('GCC_VERSION')
arch_set_build_arch(d, gcc_version)
arch_set_cross_arch(d, 'MACHINE', gcc_version)
arch_set_cross_arch(d, 'SDK', gcc_version)
return
def sanity(d):
import bb
fail = False
sdk_cpu = d.get("SDK_CPU")
if not sdk_cpu:
bb.error("SDK_CPU not set")
fail = True
sdk_os = d.get("SDK_OS")
if not sdk_os:
bb.error("SDK_OS not set")
fail = True
machine = d.get("MACHINE")
machine_cpu = d.get("MACHINE_CPU")
machine_os = d.get("MACHINE_OS")
if machine:
pass
elif machine_cpu and machine_os:
pass
elif machine_cpu:
bb.error("MACHINE_CPU set, but not MACHINE_OS")
fail = True
elif machine_os:
bb.error("MACHINE_OS set, but not MACHINE_CPU")
fail = True
else:
bb.error("MACHINE or MACHINE_CPU and MACHINE_OS must be set")
fail = True
if fail:
bb.fatal("Invalid MACHINE and/or SDK specification\n"
"Check your conf/local.conf file and/or machine and distro config files.")
return
def update(d):
gcc_version = d.get('GCC_VERSION')
arch_update(d, 'BUILD', gcc_version)
arch_update(d, 'HOST', gcc_version)
arch_update(d, 'TARGET', gcc_version)
return
def arch_set_build_arch(d, gcc_version):
try:
guess = globals()['config_guess_cache']
except KeyError:
#bb.debug("config.guess")
script = arch_find_script(d, 'config.guess')
try:
guess = arch_split(os.popen(script).readline().strip())
except OSError, e:
#bb.fatal('config.guess failed: '+e)
return None
config_guess_cache = guess
globals()['config_guess_cache'] = config_guess_cache
# Replace the silly 'pc' vendor with 'unknown' to yield a result
# comparable with arch_cross().
if guess[1] == 'pc':
guess[1] = 'unknown'
guess[1] = "build_" + guess[1]
d.set('BUILD_ARCH', '-'.join(guess))
return
def arch_set_cross_arch(d, prefix, gcc_version):
cross_arch = '%s-%s'%(d.get(prefix+'_CPU', True),
d.get(prefix+'_OS', True))
cross_arch = arch_config_sub(d, cross_arch)
abis = (d.get(prefix+'_ABI', True) or "").split()
if prefix == "MACHINE":
vendor_prefix = None
else:
vendor_prefix = prefix.lower() + "_"
cross_arch = arch_fixup(cross_arch, gcc_version, abis, vendor_prefix)
d[prefix+'_ARCH'] = cross_arch[0]
if cross_arch[1]:
d[prefix+'_CPU_FAMILIES'] = " ".join(cross_arch[1])
return
def arch_update(d, prefix, gcc_version):
arch = d.get(prefix+'_ARCH', True)
gccspec = arch_gccspec(arch, gcc_version)
(cpu, vendor, os) = arch_split(arch)
d[prefix+'_CPU'] = cpu
d[prefix+'_VENDOR'] = vendor
d[prefix+'_OS'] = os
ost = os.split('-',1)
if len(ost) > 1:
d[prefix+'_BASEOS'] = ost[0]
else:
d[prefix+'_BASEOS'] = ""
for spec in gccspec:
if spec in ("abi flags"):
continue
d[prefix+'_'+spec.upper()] = gccspec[spec]
return
def arch_fixup(arch, gcc, abis, vendor_prefix=None):
import re
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
if vendor == 'pc':
vendor = 'unknown'
families = []
if cpu in cpumap and vendor in cpumap[cpu]:
mapto = cpumap[cpu][vendor]
families = [vendor]
if isinstance(mapto, basestring):
vendor = mapto
else:
assert isinstance(mapto, tuple) and len(mapto) in (1, 2)
if isinstance(mapto[0], basestring):
vendor = mapto[0]
else:
assert isinstance(mapto[0], tuple) and len(mapto[0]) == 2
cpu = mapto[0][0]
vendor = mapto[0][1]
if len(mapto) > 1:
if isinstance(mapto[1], basestring):
families.append(mapto[1])
else:
assert isinstance(mapto[1], tuple)
families.extend(mapto[1])
families.append(vendor)
if cpu == "powerpc":
if vendor in ('e300c1', 'e300c4'):
vendor = '603e'
if vendor in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
vendor = '603e'
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
pass
elif vendor == 'unknown':
pass
else:
bb.fatal("unknown cpu vendor: %s"%vendor)
vendor = 'unknown'
# Merge DEFAULT and vendor abi_flags, keeping DEFAULT flags first
abi_flags = []
if "DEFAULT" in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu]["DEFAULT"]:
abi_flags += cpuspecs[cpu]["DEFAULT"]["abi flags"]
if vendor in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu][vendor]:
for abi_flag in cpuspecs[cpu][vendor]['abi flags']:
try:
flag_index = map(operator.itemgetter(0), abi_flags).index(
abi_flag)
abi_flags[flag_index][1] = abi_flag[1]
for flag_value in abi_flag[2].items():
abi_flags[flag_index][2][flag_value[0]] = flag_value[1]
except ValueError:
abi_flags.append(abi_flag)
if abi_flags:
cpuspec = cpuspecs[cpu][vendor]
extra_vendor = []
extra_os = []
for abi_flag in abi_flags:
diff = set(abis).intersection(set(abi_flag[2]))
if len(diff) > 1:
bb.fatal("ABI with %s is invalid, only one of %s should be given"
% (', '.join(diff), ', '.join(abi_flag[2].keys())))
if len(diff) == 1:
abi_select = diff.pop()
abis.remove(abi_select)
else:
abi_select = abi_flag[1]
if 'vendor' in abi_flag[2][abi_select]:
extra_vendor.append(abi_flag[2][abi_select].pop('vendor'))
if 'os' in abi_flag[2][abi_select]:
extra_os.append(abi_flag[2][abi_select].pop('os'))
cpuspec.update(abi_flag[2][abi_select])
vendor = vendor + ''.join(extra_vendor)
os = os + ''.join(extra_os)
cpuspecs[cpu].update({vendor : cpuspec})
if len(abis) > 0:
bb.fatal("ABI %s not valid for arch %s-%s-%s" %(', '.join(abis), cpu,vendor,os))
if vendor_prefix:
vendor = vendor_prefix + vendor
return ('-'.join((cpu, vendor, os)), families)
def arch_gccspec(arch, gcc):
import re
if gcc in gccspecs:
if arch in gccspecs[gcc]:
return gccspecs[gcc][arch]
else:
gccspecs[gcc] = {}
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
gccspec = {}
if cpu in cpuspecs:
gccspec.update(cpuspecs[cpu]['DEFAULT'])
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
gccspec.update(cpuspecs[cpu][vendor])
if os in osspecs:
gccspec.update(osspecs[os])
try:
if gccspec['mcpu'] in ('e300c1', 'e300c4'):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c1', 'e300c4'):
gccspec['mtune'] = '603e'
if gccspec['mcpu'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mtune'] = '603e'
except KeyError, e:
#bb.debug("KeyError in arch_gccspec: ")
pass
gccspecs[gcc][arch] = gccspec
return gccspec
def arch_config_sub(d, arch):
try:
config_sub_cache = globals()['config_sub_cache']
except KeyError:
config_sub_cache = {}
globals()['config_sub_cache'] = config_sub_cache
try:
canonical_arch = config_sub_cache[arch]
except KeyError:
script = arch_find_script(d, 'config.sub')
try:
bb.debug("%s %s"%(script, arch))
canonical_arch = os.popen("%s %s"%(script, arch)).readline().strip()
config_sub_cache[arch] = canonical_arch
except OSError, e:
bb.error("config.sub(%s) failed: %s"%(arch, e))
return arch
return canonical_arch
def arch_split(arch):
archtuple = arch.split('-', 2)
if len(archtuple) == 3:
return archtuple
else:
bb.error('invalid arch string: '+arch)
return None
def arch_find_script(d, filename):
try:
scripts = globals()['arch_scripts']
except KeyError:
scripts = {}
globals()['arch_scripts'] = scripts
if not filename in scripts:
for oepath in d.get('OEPATH', 1).split(':'):
filepath = os.path.join(oepath, 'scripts', filename)
if os.path.isfile(filepath):
#bb.debug("found %s: %s"%(filename, filepath))
scripts[filename] = filepath
break
if not filename in scripts:
bb.error('could not find script: %s'%filename)
return scripts[filename]
| 0.014907 |
# -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
IAF Neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
'''
'''
First, we import all necessary modules for simulation and plotting
'''
import nest
import pylab
'''
Second the Function build_network is defined to build the network and
return the handles of the spike detector and the voltmeter
'''
def build_network(dt) :
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads" : 1, "resolution" : dt})
neuron = nest.Create('iaf_neuron')
nest.SetStatus(neuron, "I_e", 376.0)
vm = nest.Create('voltmeter')
nest.SetStatus(vm, "withtime", True)
sd = nest.Create('spike_detector')
nest.Connect(vm, neuron)
nest.Connect(neuron, sd)
return vm, sd
'''
The function build_network takes the resolution as argument.
First the Kernel is reset and the number of threads is set to zero as
well as the resolution to the specified value dt. The iaf_neuron is
created and the handle is stored in the variable neuron The status of
the neuron is changed so it receives an external current. Next the
voltmeter is created and the handle stored in vm and the option
'withtime' is set, therefore times are given in the times vector in
events. Now the spike_detecor is created and its handle is stored in
sd.
Voltmeter and spikedetector are then connected to the neuron. The
connect function takes the handles as input. The Voltmeter is
connected to the neuron and the neuron to the spikedetector because
the neuron sends spikes to the detector and the voltmeter 'observes'
the neuron.
'''
'''
The neuron is simulated for three different resolutions and then
the voltage trace is plotted
'''
for dt in [0.1, 0.5, 1.0] :
print("Running simulation with dt=%.2f" % dt)
vm, sd = build_network(dt)
'''
First using build_network the network is build and the handles of
the spike detector and the voltmeter are stored in vm and sd
'''
nest.Simulate(1000.0)
'''
The network is simulated using `Simulate`, which takes the desired
simulation time in milliseconds and advances the network state by
this amount of time. During simulation, the `spike_detector`
counts the spikes of the target neuron and the total number is
read out at the end of the simulation period.
'''
potentials = nest.GetStatus(vm, "events")[0]["V_m"]
times = nest.GetStatus(vm, "events")[0]["times"]
'''
The values of the voltage recorded by the voltmeter are read out
and the values for the membrane potential are stored in potential
and the corresponding times in the times array
'''
pylab.plot(times, potentials, label="dt=%.2f" % dt)
print(" Number of spikes: {0}".format(nest.GetStatus(sd, "n_events")[0]))
'''
Using the pylab library the voltage trace is plotted over time
'''
pylab.legend(loc=3)
pylab.xlabel("time (ms)")
pylab.ylabel("V_m (mV)")
'''
Finally the axis are labelled and a legend is generated
'''
| 0.00374 |
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Build tool setup for optimized environments.
This module is a SCons tool which setups environments for optimized builds.
It is used as follows:
optimized_env = env.Clone(tools = ['target_optimized'])
"""
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Add in general options.
env['TARGET_DEBUG'] = False
env.Append(
CPPDEFINES=['NDEBUG'] + env.get('CPPDEFINES_OPTIMIZED', []),
CCFLAGS=env.get('CCFLAGS_OPTIMIZED', []),
LINKFLAGS=env.get('LINKFLAGS_OPTIMIZED', []),
)
| 0.002342 |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 0.004288 |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Peer(object):
def __init__(self, address, jobs, rel_perf, pubkey):
self.address = address # string: IP address
self.jobs = jobs # integer: number of CPUs
self.relative_performance = rel_perf
self.pubkey = pubkey # string: pubkey's fingerprint
self.shells = set() # set of strings
self.needed_work = 0
self.assigned_work = 0
self.tests = [] # list of TestCase objects
self.trusting_me = False # This peer trusts my public key.
self.trusted = False # I trust this peer's public key.
def __str__(self):
return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
(self.address, self.jobs, self.relative_performance,
self.trusting_me, self.trusted))
def AddTests(self, shell):
"""Adds tests from |shell| to this peer.
Stops when self.needed_work reaches zero, or when all of shell's tests
are assigned."""
assert self.needed_work > 0
if shell.shell not in self.shells:
self.shells.add(shell.shell)
while len(shell.tests) > 0 and self.needed_work > 0:
t = shell.tests.pop()
self.needed_work -= t.duration
self.assigned_work += t.duration
shell.total_duration -= t.duration
self.tests.append(t)
def ForceAddOneTest(self, test, shell):
"""Forcibly adds another test to this peer, disregarding needed_work."""
if shell.shell not in self.shells:
self.shells.add(shell.shell)
self.needed_work -= test.duration
self.assigned_work += test.duration
shell.total_duration -= test.duration
self.tests.append(test)
def Pack(self):
"""Creates a JSON serializable representation of this Peer."""
return [self.address, self.jobs, self.relative_performance]
@staticmethod
def Unpack(packed):
"""Creates a Peer object built from a packed representation."""
pubkey_dummy = "" # Callers of this don't care (only the server does).
return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
| 0.004493 |
import os
import random
class CPU(object):
"""
The Chip-8 has 4KB of RAM from 0x000 to 0xFFF. The original interpreter is stored in memory
from 0x000 to 0x1FF so most programs will start at 0x200. The Chip-8 has 16 8-bit registers
and a 16-bit register that stores memory addresses. There are also 2 8-bit registers that
are the delay and sound timers. The stack can hold 16 16-bit values. The Chip-8 had a 16-bit
keypad from 0~9 and A~F.
"""
def __init__(self, display):
"""
Initializes all the needed components of the Chip-8 CPU to their proper values
"""
self.memory = [0] * 4096
self.registers = [0] * 16
self.address = [0] * 16
self.stack = [0] * 16
self.keys = [0] * 16
self.display_pixels = [[0 for _ in range(64)] for _ in range(32)]
self.pc = 0x200
self.sp = 0
self.register_I = 0
self.delay_timer = 0
self.sound_timer = 0
self.display = display
self.draw = False
self.test = True
self.font_set = [
0xF0, 0x90, 0x90, 0x90, 0xF0, # 0
0x20, 0x60, 0x20, 0x20, 0x70, # 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, # 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, # 3
0x90, 0x90, 0xF0, 0x10, 0x10, # 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, # 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, # 6
0xF0, 0x10, 0x20, 0x40, 0x40, # 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, # 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, # 9
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80 # F
]
for x in range(0, len(self.font_set)):
self.memory[x] = self.font_set[x]
def testing(self):
for num in range (0, len(self.registers)):
print("V" + str(num) + ": " + str(self.registers[num]))
print("I: " + str(self.register_I))
print("pc: " + str(self.pc))
print("sp: " + str(self.sp))
print("dt: " + str(self.delay_timer))
print("st: " + str(self.sound_timer))
def load_rom(self, rom_name):
"""
Checks if the user entered rom name exists in the proper directory. If the rom exists
and is a valid Chip-8 rom, it is stored into the proper addresses in the CPU memory.
"""
print("Loading %s..." % (rom_name))
os.chdir('..')
os.chdir('roms')
try:
rom = open(rom_name, "rb")
except IOError:
print("Rom does not exist, please enter a valid rom file.")
sys.exit()
else:
rom_bytes = rom.read()
# First 512 bytes are used by the Chip-8 font set.
if len(rom_bytes) > (4096 - 512):
print("Rom file is too large, please choose a valid rom file.")
#print(len(rom_string))
# Loads rom into memory starting from the address after the first 512 addresses
for byte in range(0, len(rom_bytes)):
self.memory[byte + self.pc] = rom_bytes[byte]
print("Done loading %s!" %(rom_name))
rom.close()
def timer_decrement(self):
if self.delay_timer != 0:
self.delay_timer -= 1
if self.sound_timer != 0:
self.sound_timer -= 1
def get_opcode(self):
"""
Combines bytes in adjacent memory addresses to create a 2 byte long opcode. Left shifts the
first byte by 8 bits and performs a bitwise OR operation to change the created mask into
the values of the second byte.
"""
first_byte = self.memory[self.pc]
second_byte = self.memory[self.pc + 1]
opcode = (first_byte << 8 | second_byte)
return opcode
def perform_opcode(self, opcode):
"""
Decodes the given opcode by identifying the first hexidecimal value. If required, the last
hexidecimal value is also identified and the decoded opcode is performed. The pc is then
advanced based on the opcode.
"""
# Identify first hex to determine which opcode nibble to perform
first_hex = opcode & 0xF000
if first_hex == 0x0000:
last_hex = opcode & 0x000F
# Opcode 00E0: clear screen
if last_hex == 0x0000:
self.display.clear_display()
self.draw = True
self.pc += 2
# Opcode 00EE: returns from subroutine
elif last_hex == 0x000E:
self.sp -= 1
self.pc = self.stack[self.sp]
#self.sp -= 1
self.pc += 2
# Opcode 1NNN: Jump to address NNN
elif first_hex == 0x1000:
# Get last 3 hex values
address = opcode & 0x0FFF
self.pc = address
# Opcode 2NNN: Call subroutine at NNN
# Adds current pc to stack and increments sp
elif first_hex == 0x2000:
address = opcode & 0x0FFF
self.stack[self.sp] = self.pc
self.sp += 1
self.pc = address
# Opcode 3XKK: Skips next instruction if value stored in register X = KK
elif first_hex == 0x3000:
if (self.registers[(opcode & 0x0F00) >> 8] == (opcode & 0x00FF)):
self.pc += 4
else:
self.pc += 2
# Opcode 4XKK: Skips next instruction if value stored in register X != KK
elif first_hex == 0x4000:
if (self.registers[(opcode & 0x0F00) >> 8] != (opcode & 0x00FF)):
self.pc += 4
else:
self.pc += 2
# Opcode 5XY0: Skips next instruction if value stored in register X = value in register Y
elif first_hex == 0x5000:
if (self.registers[(opcode & 0x0F00) >> 8] == self.registers[(opcode & 0x00F0) >> 4]):
self.pc += 4
else:
self.pc += 2
# Opcode 6XKK: Load KK into register X
elif first_hex == 0x6000:
value = opcode & 0x00FF
self.registers[(opcode & 0x0F00) >> 8] = value
self.pc += 2
# Opcode 7XKK: Adds KK to the value in register X and stores it in register X
elif first_hex == 0x7000:
self.registers[(opcode & 0x0F00) >> 8] += (opcode & 0x00FF)
self.pc += 2
elif first_hex == 0x8000:
last_hex = opcode & 0x000F
# Opcode 8XY0: Set value of register X to the value of register Y
if last_hex == 0x000:
self.registers[(opcode & 0x0F00) >> 8] = self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY1: Set value of register X to (value of register X OR value of register Y)
elif last_hex == 0x001:
self.registers[(opcode & 0x0F00) >> 8] |= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY2: Set value of register X to (value of register X AND value of register Y)
elif last_hex == 0x002:
self.registers[(opcode & 0x0F00) >> 8] &= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY3: Set value of register X to (value of register X XOR value of register Y)
elif last_hex == 0x003:
self.registers[(opcode & 0x0F00) >> 8] ^= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY4: Set value of register X to (value of register X ADD value of register Y) and set carry
elif last_hex == 0x004:
value_sum = self.registers[(opcode & 0x0F00) >> 8] + self.registers[(opcode & 0x00F0) >> 4]
# Only keeps the lowest 8 bits if the sum is greater than 0xFF and sets the carry register to 1
if value_sum > 0xFF:
self.registers[0xF] = 1
self.registers[(opcode & 0x0F00) >> 8] = (value_sum & 0x00FF)
else:
self.registers[0xF] =0
self.registers[(opcode & 0x0F00) >> 8] = value_sum
self.pc += 2
# Opcode 8XY5: Set value of register X to (value of register X SUB value of register Y)
elif last_hex == 0x005:
# Sets carry register to 0 if there is a borrow else set to 1
if (self.registers[(opcode & 0x0F00) >> 8] > self.registers[(opcode & 0x00F0) >> 4]):
self.registers[0xF] = 1
else:
self.registers[0xF] = 0
self.registers[(opcode & 0x0F00) >> 8] -= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY6: Right shift the value of register X by 1
elif last_hex == 0x006:
# Keeps the least significant bit of the value of register X in register F
self.registers[0xF] = (self.registers[(opcode & 0x0F00) >> 8] & 0x0001)
self.registers[(opcode & 0x0F00) >> 8] = (self.registers[(opcode & 0x0F00) >> 8] >> 1)
self.pc += 2
# Opcode 8XY7: Set value of register X to (value of register Y SUB value of register X)
elif last_hex == 0x007:
# Sets carry register to 0 if there is a borrow else set to 1
if (self.registers[(opcode & 0x0F00) >> 8] < self.registers[(opcode & 0x00F0) >> 4]):
self.registers[0xF] = 1
else:
self.registers[0xF] = 0
self.registers[(opcode & 0x0F00) >> 8] = self.registers[(opcode & 0x00F0) >> 4] - self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode 8XYE: Left shift the value of register X by 1
elif last_hex == 0x00E:
# Keeps the most significant bit of the value of register X in register F
self.registers[0xF] = (self.registers[(opcode & 0x0F00) >> 8] >> 7)
self.registers[(opcode & 0x0F00) >> 8] = (self.registers[(opcode & 0x0F00) >> 8] << 1)
self.pc += 2
# Opcode 9XY0: Skip next instruction if value of register X != value of register Y
elif first_hex == 0x9000:
if self.registers[(opcode & 0x0F00) >> 8] != self.registers[(opcode & 0x00F0) >> 4]:
self.pc += 4
else:
self.pc += 2
# Opcode ANNN: Set value of register I to NNN
elif first_hex == 0xA000:
self.register_I = (opcode & 0x0FFF)
self.pc += 2
# Opcode BNNN: Jump to location NNN + value of register 0
elif first_hex == 0xB000:
self.pc = (opcode & 0x0FFF) + self.registers[0]
# Opcode CXKK: Sets the value of register X to (random byte AND KK)
elif first_hex == 0xC000:
random_byte = random.randint(0, 255)
self.registers[(opcode & 0x0F00) >> 8] = (random_byte & (opcode & 0x00FF))
self.pc += 2
# Opcode DXYN: Display an N-byte sprite starting at memory location I at (value of register X, value of register Y)
# If the pixel of the sprite would go past the edge of the screen, wrap it around instead. Sprites are N pixels tall
# and 8 pixels wide on the standard CHIP-8. Drawing works by performing an XOR on a pixel on the screen with a given
# bit. Set value of register F to 1 if collision else set it to 0
elif first_hex == 0xD000:
height = opcode & 0x000F
x_coord = self.registers[(opcode & 0x0F00) >> 8]
y_coord = self.registers[(opcode & 0x00F0) >> 4]
location = self.register_I
self.registers[0xF] = 0
sprite_list = []
print(str(height))
for offset in range(0, height):
sprite_bits = []
sprite_byte = self.memory[location + offset]
sprite_byte = (bin(sprite_byte)[2:]).zfill(8)
for bit in sprite_byte:
sprite_bits.append(bit)
sprite_list.append(sprite_bits)
"""
for sprite in sprite_list:
print(str(sprite))
"""
for sprite in range(len(sprite_list)):
increment = 0
for pixel in sprite_list[sprite]:
screen_pixel = self.display.check_pixel((x_coord + increment) % self.display.width, (y_coord + sprite) % self.display.height)
pixel_color = int(pixel)
if pixel_color == 1 and screen_pixel == 1:
self.registers[0xF] = 1
pixel_color = 0
elif pixel_color == 0 and screen_pixel == 1:
pixel_color = 1
self.display.set_pixel((x_coord + increment) % self.display.width, (y_coord + sprite) % self.display.height, pixel_color)
increment += 1
self.draw = True
self.pc += 2
elif first_hex == 0xE000:
last_hex = opcode & 0x000F
# TODO implement pygame keys
# Opcode EX9E: Skips the next instruction if key with the value of register X is pressed
if last_hex == 0x000E:
if self.keys[(opcode & 0x0F00) >> 8] != 0:
self.pc += 4
else:
self.pc += 2
# Opcode EXA1: Skips the next instruction if key with the value of register X is not pressed
if last_hex == 0x0001:
if self.keys[(opcode & 0x0F00) >> 8] == 0:
self.pc += 4
else:
self.pc +=2
elif first_hex == 0xF000:
last_hex = opcode & 0x000F
# Opcode FX07: Set the value of register X to the value of the delay timer
if last_hex == 0x0007:
self.registers[(opcode & 0x0F00) >> 8] = self.delay_timer
self.pc += 2
# TODO implement pygame keys
# Opcode FX0A: Wait for a key press and stores the value of the pressed key into register X
if last_hex == 0x000A:
key_was_pressed = False
while key_was_pressed is not True:
for key in range(0, len(self.keys)):
if key is not 0:
self.registers[(opcode & 0x0F00) >> 8] = key
key_was_pressed = True
self.pc += 2
# Opcode FX15: Set the value of the delay timer to the value of register X
if (opcode & 0x00FF) == 0x0015:
self.delay_timer = self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX18: Set the value of the sound timer to the value of register X
if last_hex == 0x0008:
self.sound_timer = self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX1E: Set the value of register I to (value of register I + value of register X)
if last_hex == 0x000E:
self.register_I += self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX29: Set value of register I to the location of sprite for the digit of the value of register X
# Sprites are 5 bytes long so the value of register X must be multiplied by 5
if last_hex == 0x0009:
self.register_I = self.registers[(opcode & 0x0F00) >> 8] * 0x5
self.pc += 2
# Opcode FX33: Store the binary-coded decimal representation of the value of register X in memory locations I, I+1, and I+2
if last_hex == 0x0003:
value = self.registers[(opcode & 0x0F00) >> 8]
difference = 2
while difference >= 0:
self.memory[self.register_I + difference] = value % 10
value = value // 10
difference -= 1
self.pc += 2
# Opcode Fx55: Store the values of register 0 through X in memory starting in location of the value of register I
if (opcode & 0x00FF) == 0x0055:
location = 0
end = (opcode & 0x0F00) >> 8
while location <= end:
self.memory[self.register_I + location] = self.registers[location]
location += 1
self.pc += 2
# Opcode FX65: Load the registers 0 through X with values starting from the address of the value of register I
if (opcode & 0x00FF) == 0x0065:
location = 0
end = (opcode & 0x0F00) >> 8
while location <= end:
self.registers[location] = self.memory[self.register_I + location]
location += 1
self.pc += 2
else:
print("Invalid opcode, chippy will now quit")
quit()
def perform_cycle(self):
current_opcode = self.get_opcode()
print(hex(current_opcode))
if self.test == True:
self.testing()
self.perform_opcode(current_opcode)
self.timer_decrement()
if self.draw == True:
self.display.update_display()
self.draw = False
| 0.007022 |
"""
For a given aws account, go through all un-attached volumes and tag them.
"""
import boto
import boto.utils
import argparse
import logging
import subprocess
import time
import os
from os.path import join, exists, isdir, islink, realpath, basename, dirname
import yaml
# needs to be pip installed
import netaddr
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
log_level = logging.INFO
def tags_for_hostname(hostname, mapping):
logging.debug("Hostname is {}".format(hostname))
if not hostname.startswith('ip-'):
return {}
octets = hostname.lstrip('ip-').split('-')
tags = {}
# Update with env and deployment info
tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]])
ip_addr = netaddr.IPAddress(".".join(octets))
for key, value in mapping['CIDR_REST'].items():
cidr = ".".join([
mapping['CIDR_FIRST_OCTET'],
octets[1],
key])
cidrset = netaddr.IPSet([cidr])
if ip_addr in cidrset:
tags.update(value)
return tags
def potential_devices(root_device):
device_dir = dirname(root_device)
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
all_devices = filter(relevant_devices, all_devices)
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
return map(lambda x: join(device_dir, x), all_devices)
def get_tags_for_disk(mountpoint):
tag_data = {}
# Look at some files on it to determine:
# - hostname
# - environment
# - deployment
# - cluster
# - instance-id
# - date created
hostname_file = join(mountpoint, "etc", "hostname")
edx_dir = join(mountpoint, 'edx', 'app')
if exists(hostname_file):
# This means this was a root volume.
with open(hostname_file, 'r') as f:
hostname = f.readline().strip()
tag_data['hostname'] = hostname
if exists(edx_dir) and isdir(edx_dir):
# This is an ansible related ami, we'll try to map
# the hostname to a knows deployment and cluster.
cluster_tags = tags_for_hostname(hostname, mappings)
tag_data.update(cluster_tags)
else:
# Not an ansible created root volume.
tag_data['cluster'] = 'unknown'
else:
# Not a root volume
tag_data['cluster'] = "unknown"
instance_file = join(mountpoint, "var", "lib", "cloud", "instance")
if exists(instance_file) and islink(instance_file):
resolved_path = realpath(instance_file)
old_instance_id = basename(resolved_path)
tag_data['instance-id'] = old_instance_id
return tag_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.")
parser.add_argument("--profile", '-p',
help="AWS Profile to use with boto.")
parser.add_argument("--noop", "-n", action="store_true",
help="Don't actually tag anything.")
parser.add_argument("--verbose", "-v", action="store_true",
help="More verbose output.")
parser.add_argument("--device", "-d", default="/dev/xvdf",
help="The /dev/??? where the volume should be mounted.")
parser.add_argument("--mountpoint", "-m", default="/mnt",
help="Location to mount the new device.")
parser.add_argument("--config", "-c", required=True,
help="Configuration to map hostnames to tags.")
# The config should specify what tags to associate with the second
# and this octet of the hostname which should be the ip address.
# example:
args = parser.parse_args()
mappings = yaml.safe_load(open(args.config,'r'))
# Setup Logging
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=log_level)
# setup boto
ec2 = boto.connect_ec2(profile_name=args.profile)
# get mounting args
id_info = boto.utils.get_instance_identity()['document']
instance_id = id_info['instanceId']
az = id_info['availabilityZone']
root_device = args.device
mountpoint = args.mountpoint
# Find all unattached volumes
filters = { "status": "available", "availability-zone": az }
potential_volumes = ec2.get_all_volumes(filters=filters)
logging.debug("Found {} unattached volumes in {}".format(len(potential_volumes), az))
for vol in potential_volumes:
if "cluster" in vol.tags:
continue
# Attach volume to the instance running this process
logging.debug("Trying to attach {} to {} at {}".format(
vol.id, instance_id, root_device))
try:
ec2.attach_volume(vol.id, instance_id, root_device)
# Wait for the volume to finish attaching.
waiting_msg = "Waiting for {} to be available at {}"
timeout = TIMEOUT
while not exists(root_device):
time.sleep(2)
logging.debug(waiting_msg.format(vol.id, root_device))
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while attaching {}.".format(vol.id))
exit(1)
# Because a volume might have multiple mount points
devices_on_volume = potential_devices(root_device)
if len(devices_on_volume) != 1:
vol.add_tag("devices_on_volume", str(devices_on_volume))
# Don't tag in this case because the different devices
# may have conflicting tags.
logging.info("Skipping {} because it has multiple mountpoints.".format(vol.id))
logging.info("{} has mountpoints {}".format(vol.id, str(devices_on_volume)))
else:
device = devices_on_volume[0]
try:
# Mount the volume
subprocess.check_call(["sudo", "mount", device, mountpoint])
# Learn all tags we can know from content on disk.
tag_data = get_tags_for_disk(mountpoint)
tag_data['created'] = vol.create_time
# If they are found tag the instance with them
if args.noop:
logging.info("Would have tagged {} with: \n{}".format(vol.id, str(tag_data)))
else:
logging.info("Tagging {} with: \n{}".format(vol.id, str(tag_data)))
vol.add_tags(tag_data)
finally:
# Un-mount the volume
subprocess.check_call(['sudo', 'umount', mountpoint])
finally:
# Need this to be a function so we always re-check the API for status.
is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available"
timeout = TIMEOUT
while exists(root_device) or is_attached(vol.id):
if is_attached(vol.id):
try:
# detach the volume
ec2.detach_volume(vol.id)
except boto.exception.EC2ResponseError as e:
logging.warning("Failed to detach volume. Will try again in a bit.")
time.sleep(2)
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while detaching {}.".format(vol.id))
exit(1)
logging.debug("Waiting for {} to be detached.".format(vol.id))
| 0.003777 |
from collections import defaultdict
from itertools import product, chain
from math import sqrt, floor, ceil
from PyQt4.QtCore import Qt, QSize
from PyQt4.QtGui import (QGraphicsScene, QGraphicsView, QColor, QPen, QBrush,
QDialog, QApplication, QSizePolicy)
import Orange
from Orange.data import Table, filter
from Orange.data.sql.table import SqlTable, LARGE_TABLE, DEFAULT_SAMPLE_TIME
from Orange.statistics.contingency import get_contingency
from Orange.widgets import gui
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils import getHtmlCompatibleString
from Orange.widgets.utils.itemmodels import VariableListModel
from Orange.widgets.visualize.owmosaic import (OWCanvasText, OWCanvasRectangle,
OWCanvasLine)
from Orange.widgets.widget import OWWidget, Default, AttributeList
class _ViewWithPress(QGraphicsView):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop("handler")
super().__init__(*args, **kwargs)
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if not ev.isAccepted():
self.handler()
class OWSieveDiagram(OWWidget):
name = "Sieve Diagram"
description = "A two-way contingency table providing information in " \
"relation to expected frequency of combination of feature " \
"values under independence."
icon = "icons/SieveDiagram.svg"
priority = 4200
inputs = [("Data", Table, "set_data", Default),
("Features", AttributeList, "set_input_features")]
outputs = [("Selection", Table)]
graph_name = "canvas"
want_control_area = False
settingsHandler = DomainContextHandler()
attrX = ContextSetting("")
attrY = ContextSetting("")
selection = ContextSetting(set())
def __init__(self):
super().__init__()
self.data = None
self.input_features = None
self.attrs = []
self.attr_box = gui.hBox(self.mainArea)
model = VariableListModel()
model.wrap(self.attrs)
self.attrXCombo = gui.comboBox(
self.attr_box, self, value="attrX", contentsLength=12,
callback=self.change_attr, sendSelectedValue=True, valueType=str)
self.attrXCombo.setModel(model)
gui.widgetLabel(self.attr_box, "\u2715").\
setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.attrYCombo = gui.comboBox(
self.attr_box, self, value="attrY", contentsLength=12,
callback=self.change_attr, sendSelectedValue=True, valueType=str)
self.attrYCombo.setModel(model)
self.canvas = QGraphicsScene()
self.canvasView = _ViewWithPress(self.canvas, self.mainArea,
handler=self.reset_selection)
self.mainArea.layout().addWidget(self.canvasView)
self.canvasView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvasView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
box = gui.hBox(self.mainArea)
gui.button(box, None, "&Save Graph",
callback=self.save_graph, autoDefault=False)
gui.button(box, None, "&Report",
callback=self.show_report, autoDefault=False)
def sizeHint(self):
return QSize(450, 550)
def set_data(self, data):
if type(data) == SqlTable and data.approx_len() > LARGE_TABLE:
data = data.sample_time(DEFAULT_SAMPLE_TIME)
self.closeContext()
self.data = data
self.areas = []
if self.data is None:
self.attrs[:] = []
else:
self.attrs[:] = [
var for var in chain(self.data.domain.attributes,
self.data.domain.metas)
if var.is_discrete
]
if self.attrs:
self.attrX = self.attrs[0].name
self.attrY = self.attrs[len(self.attrs) > 1].name
else:
self.attrX = self.attrY = None
self.openContext(self.data)
self.information(0, "")
if data and any(attr.is_continuous for attr in data.domain):
self.information(0, "Data contains continuous variables. "
"Discretize the data to use them.")
self.resolve_shown_attributes()
self.update_selection()
def change_attr(self):
self.selection = set()
self.updateGraph()
self.update_selection()
def set_input_features(self, attrList):
self.input_features = attrList
self.resolve_shown_attributes()
self.update_selection()
def resolve_shown_attributes(self):
self.warning(1)
self.attr_box.setEnabled(True)
if self.input_features: # non-None and non-empty!
features = [f for f in self.input_features if f in self.attrs]
if not features:
self.warning(1, "Features from the input signal "
"are not present in the data")
else:
old_attrs = self.attrX, self.attrY
self.attrX, self.attrY = [f.name for f in (features * 2)[:2]]
self.attr_box.setEnabled(False)
if (self.attrX, self.attrY) != old_attrs:
self.selection = set()
# else: do nothing; keep current features, even if input with the
# features just changed to None
self.updateGraph()
def resizeEvent(self, e):
OWWidget.resizeEvent(self,e)
self.updateGraph()
def showEvent(self, ev):
OWWidget.showEvent(self, ev)
self.updateGraph()
def reset_selection(self):
self.selection = set()
self.update_selection()
def select_area(self, area, ev):
if ev.button() != Qt.LeftButton:
return
index = self.areas.index(area)
if ev.modifiers() & Qt.ControlModifier:
self.selection ^= {index}
else:
self.selection = {index}
self.update_selection()
def update_selection(self):
if self.areas is None or not self.selection:
self.send("Selection", None)
return
filters = []
for i, area in enumerate(self.areas):
if i in self.selection:
width = 4
val_x, val_y = area.value_pair
filters.append(
filter.Values([
filter.FilterDiscrete(self.attrX, [val_x]),
filter.FilterDiscrete(self.attrY, [val_y])
]))
else:
width = 1
pen = area.pen()
pen.setWidth(width)
area.setPen(pen)
if len(filters) == 1:
filters = filters[0]
else:
filters = filter.Values(filters, conjunction=False)
self.send("Selection", filters(self.data))
# -----------------------------------------------------------------------
# Everything from here on is ancient and has been changed only according
# to what has been changed above. Some clean-up may be in order some day
#
def updateGraph(self, *args):
for item in self.canvas.items():
self.canvas.removeItem(item)
if self.data is None or len(self.data) == 0 or \
self.attrX is None or self.attrY is None:
return
data = self.data[:, [self.attrX, self.attrY]]
valsX = []
valsY = []
contX = get_contingency(data, self.attrX, self.attrX)
contY = get_contingency(data, self.attrY, self.attrY)
# compute contingency of x and y attributes
for entry in contX:
sum_ = 0
try:
for val in entry: sum_ += val
except: pass
valsX.append(sum_)
for entry in contY:
sum_ = 0
try:
for val in entry: sum_ += val
except: pass
valsY.append(sum_)
contXY = self.getConditionalDistributions(data, [data.domain[self.attrX], data.domain[self.attrY]])
# compute probabilities
probs = {}
for i in range(len(valsX)):
valx = valsX[i]
for j in range(len(valsY)):
valy = valsY[j]
actualProb = 0
try:
actualProb = contXY['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
# for val in contXY['%s-%s' %(i, j)]: actualProb += val
except:
actualProb = 0
probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])] = ((data.domain[self.attrX].values[i], valx), (data.domain[self.attrY].values[j], valy), actualProb, len(data))
#get text width of Y labels
max_ylabel_w = 0
for j in range(len(valsY)):
xl = OWCanvasText(self.canvas, "", 0, 0, htmlText = getHtmlCompatibleString(data.domain[self.attrY].values[j]), show=False)
max_ylabel_w = max(int(xl.boundingRect().width()), max_ylabel_w)
max_ylabel_w = min(max_ylabel_w, 200) #upper limit for label widths
# get text width of Y attribute name
text = OWCanvasText(self.canvas, data.domain[self.attrY].name, x = 0, y = 0, bold = 1, show = 0, vertical=True)
xOff = int(text.boundingRect().height() + max_ylabel_w)
yOff = 55
sqareSize = min(self.canvasView.width() - xOff - 35, self.canvasView.height() - yOff - 50)
sqareSize = max(sqareSize, 10)
self.canvasView.setSceneRect(0, 0, self.canvasView.width(), self.canvasView.height())
# print graph name
name = "<b>P(%s, %s) ≠ P(%s)×P(%s)</b>" %(self.attrX, self.attrY, self.attrX, self.attrY)
OWCanvasText(self.canvas, "" , xOff+ sqareSize/2, 20, Qt.AlignCenter, htmlText = name)
OWCanvasText(self.canvas, "N = " + str(len(data)), xOff+ sqareSize/2, 38, Qt.AlignCenter, bold = 0)
######################
# compute chi-square
chisquare = 0.0
for i in range(len(valsX)):
for j in range(len(valsY)):
((xAttr, xVal), (yAttr, yVal), actual, sum_) = probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
expected = float(xVal*yVal)/float(sum_)
if expected == 0: continue
pearson2 = (actual - expected)*(actual - expected) / expected
chisquare += pearson2
######################
# draw rectangles
currX = xOff
max_xlabel_h = 0
normX, normY = sum(valsX), sum(valsY)
self.areas = []
for i in range(len(valsX)):
if valsX[i] == 0: continue
currY = yOff
width = int(float(sqareSize * valsX[i])/float(normX))
for j in range(len(valsY)-1, -1, -1): # this way we sort y values correctly
((xAttr, xVal), (yAttr, yVal), actual, sum_) = probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
if valsY[j] == 0: continue
height = int(float(sqareSize * valsY[j])/float(normY))
# create rectangle
selected = len(self.areas) in self.selection
rect = OWCanvasRectangle(
self.canvas, currX+2, currY+2, width-4, height-4, z = -10,
onclick=self.select_area)
rect.value_pair = i, j
self.areas.append(rect)
self.addRectIndependencePearson(rect, currX+2, currY+2, width-4, height-4, (xAttr, xVal), (yAttr, yVal), actual, sum_,
width=1 + 3 * selected, # Ugly! This is needed since
# resize redraws the graph! When this is handled by resizing
# just the viewer, update_selection will take care of this
)
expected = float(xVal*yVal)/float(sum_)
pearson = (actual - expected) / sqrt(expected)
tooltipText = """<b>X Attribute: %s</b><br>Value: <b>%s</b><br>Number of instances (p(x)): <b>%d (%.2f%%)</b><hr>
<b>Y Attribute: %s</b><br>Value: <b>%s</b><br>Number of instances (p(y)): <b>%d (%.2f%%)</b><hr>
<b>Number Of Instances (Probabilities):</b><br>Expected (p(x)p(y)): <b>%.1f (%.2f%%)</b><br>Actual (p(x,y)): <b>%d (%.2f%%)</b>
<hr><b>Statistics:</b><br>Chi-square: <b>%.2f</b><br>Standardized Pearson residual: <b>%.2f</b>""" %(self.attrX, getHtmlCompatibleString(xAttr), xVal, 100.0*float(xVal)/float(sum_), self.attrY, getHtmlCompatibleString(yAttr), yVal, 100.0*float(yVal)/float(sum_), expected, 100.0*float(xVal*yVal)/float(sum_*sum_), actual, 100.0*float(actual)/float(sum_), chisquare, pearson )
rect.setToolTip(tooltipText)
currY += height
if currX == xOff:
OWCanvasText(self.canvas, "", xOff, currY - height/2, Qt.AlignRight | Qt.AlignVCenter, htmlText = getHtmlCompatibleString(data.domain[self.attrY].values[j]))
xl = OWCanvasText(self.canvas, "", currX + width/2, yOff + sqareSize, Qt.AlignHCenter | Qt.AlignTop, htmlText = getHtmlCompatibleString(data.domain[self.attrX].values[i]))
max_xlabel_h = max(int(xl.boundingRect().height()), max_xlabel_h)
currX += width
# show attribute names
OWCanvasText(self.canvas, self.attrY, 0, yOff + sqareSize/2, Qt.AlignLeft | Qt.AlignVCenter, bold = 1, vertical=True)
OWCanvasText(self.canvas, self.attrX, xOff + sqareSize/2, yOff + sqareSize + max_xlabel_h, Qt.AlignHCenter | Qt.AlignTop, bold = 1)
# create a dictionary with all possible pairs of "combination-of-attr-values" : count
def getConditionalDistributions(self, data, attrs):
cond_dist = defaultdict(int)
all_attrs = [data.domain[a] for a in attrs]
if data.domain.class_var is not None:
all_attrs.append(data.domain.class_var)
for i in range(1, len(all_attrs) + 1):
attr = all_attrs[:i]
if type(data) == SqlTable:
# make all possible pairs of attributes + class_var
attr = [a.to_sql() for a in attr]
fields = attr + ["COUNT(*)"]
query = data._sql_query(fields, group_by=attr)
with data._execute_sql_query(query) as cur:
res = cur.fetchall()
for r in res:
str_values =[a.repr_val(a.to_val(x)) for a, x in zip(all_attrs, r[:-1])]
str_values = [x if x != '?' else 'None' for x in str_values]
cond_dist['-'.join(str_values)] = r[-1]
else:
for indices in product(*(range(len(a.values)) for a in attr)):
vals = []
conditions = []
for k, ind in enumerate(indices):
vals.append(attr[k].values[ind])
fd = Orange.data.filter.FilterDiscrete(column=attr[k], values=[attr[k].values[ind]])
conditions.append(fd)
filt = Orange.data.filter.Values(conditions)
filtdata = filt(data)
cond_dist['-'.join(vals)] = len(filtdata)
return cond_dist
######################################################################
## show deviations from attribute independence with standardized pearson residuals
def addRectIndependencePearson(self, rect, x, y, w, h, xAttr_xVal, yAttr_yVal, actual, sum, width):
xAttr, xVal = xAttr_xVal
yAttr, yVal = yAttr_yVal
expected = float(xVal*yVal)/float(sum)
pearson = (actual - expected) / sqrt(expected)
if pearson > 0: # if there are more examples that we would expect under the null hypothesis
intPearson = floor(pearson)
pen = QPen(QColor(0,0,255), width); rect.setPen(pen)
b = 255
r = g = 255 - intPearson*20
r = g = max(r, 55) #
elif pearson < 0:
intPearson = ceil(pearson)
pen = QPen(QColor(255,0,0), width)
rect.setPen(pen)
r = 255
b = g = 255 + intPearson*20
b = g = max(b, 55)
else:
pen = QPen(QColor(255,255,255), width)
r = g = b = 255 # white
color = QColor(r,g,b)
brush = QBrush(color); rect.setBrush(brush)
if pearson > 0:
pearson = min(pearson, 10)
kvoc = 1 - 0.08 * pearson # if pearson in [0..10] --> kvoc in [1..0.2]
else:
pearson = max(pearson, -10)
kvoc = 1 - 0.4*pearson
self.addLines(x,y,w,h, kvoc, pen)
##################################################
# add lines
def addLines(self, x,y,w,h, diff, pen):
if w == 0 or h == 0: return
# create lines
dist = 20 # original distance between two lines in pixels
dist = dist * diff
temp = dist
while (temp < w):
OWCanvasLine(self.canvas, temp+x, y, temp+x, y+h, 1, pen.color())
temp += dist
temp = dist
while (temp < h):
OWCanvasLine(self.canvas, x, y+temp, x+w, y+temp, 1, pen.color())
temp += dist
def closeEvent(self, ce):
QDialog.closeEvent(self, ce)
def get_widget_name_extension(self):
if self.data is not None:
return "{} vs {}".format(self.attrX, self.attrY)
def send_report(self):
self.report_plot()
# test widget appearance
if __name__=="__main__":
import sys
a=QApplication(sys.argv)
ow=OWSieveDiagram()
ow.show()
data = Table(r"zoo.tab")
ow.set_data(data)
a.exec_()
ow.saveSettings()
| 0.006135 |
"""
Views related to course tabs
"""
from access import has_course_access
from util.json_request import expect_json, JsonResponse
from django.http import HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import BlockUsageLocator
from xmodule.tabs import CourseTabList, StaticTab, CourseTab, InvalidTabsException
from ..utils import get_modulestore, get_lms_link_for_item
__all__ = ['tabs_handler']
@expect_json
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def tabs_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for static tabs.
GET
html: return page for editing static tabs
json: not supported
PUT or POST
json: update the tab order. It is expected that the request body contains a JSON-encoded dict with entry "tabs".
The value for "tabs" is an array of tab locators, indicating the desired order of the tabs.
Creating a tab, deleting a tab, or changing its contents is not supported through this method.
Instead use the general xblock URL (see item.xblock_handler).
"""
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
if not has_course_access(request.user, locator):
raise PermissionDenied()
old_location = loc_mapper().translate_locator_to_location(locator)
store = get_modulestore(old_location)
course_item = store.get_item(old_location)
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
if 'tabs' in request.json:
return reorder_tabs_handler(course_item, request)
elif 'tab_id_locator' in request.json:
return edit_tab_handler(course_item, request)
else:
raise NotImplementedError('Creating or changing tab content is not supported.')
elif request.method == 'GET': # assume html
# get all tabs from the tabs list: static tabs (a.k.a. user-created tabs) and built-in tabs
# present in the same order they are displayed in LMS
tabs_to_render = []
for tab in CourseTabList.iterate_displayable_cms(
course_item,
settings,
):
if isinstance(tab, StaticTab):
# static tab needs its locator information to render itself as an xmodule
static_tab_loc = old_location.replace(category='static_tab', name=tab.url_slug)
tab.locator = loc_mapper().translate_location(
course_item.location.course_id, static_tab_loc, False, True
)
tabs_to_render.append(tab)
return render_to_response('edit-tabs.html', {
'context_course': course_item,
'tabs_to_render': tabs_to_render,
'course_locator': locator,
'lms_link': get_lms_link_for_item(course_item.location),
})
else:
return HttpResponseNotFound()
def reorder_tabs_handler(course_item, request):
"""
Helper function for handling reorder of tabs request
"""
# Tabs are identified by tab_id or locators.
# The locators are used to identify static tabs since they are xmodules.
# Although all tabs have tab_ids, newly created static tabs do not know
# their tab_ids since the xmodule editor uses only locators to identify new objects.
requested_tab_id_locators = request.json['tabs']
# original tab list in original order
old_tab_list = course_item.tabs
# create a new list in the new order
new_tab_list = []
for tab_id_locator in requested_tab_id_locators:
tab = get_tab_by_tab_id_locator(old_tab_list, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
new_tab_list.append(tab)
# the old_tab_list may contain additional tabs that were not rendered in the UI because of
# global or course settings. so add those to the end of the list.
non_displayed_tabs = set(old_tab_list) - set(new_tab_list)
new_tab_list.extend(non_displayed_tabs)
# validate the tabs to make sure everything is Ok (e.g., did the client try to reorder unmovable tabs?)
try:
CourseTabList.validate_tabs(new_tab_list)
except InvalidTabsException, exception:
return JsonResponse(
{"error": "New list of tabs is not valid: {0}.".format(str(exception))}, status=400
)
# persist the new order of the tabs
course_item.tabs = new_tab_list
modulestore('direct').update_item(course_item, request.user.id)
return JsonResponse()
def edit_tab_handler(course_item, request):
"""
Helper function for handling requests to edit settings of a single tab
"""
# Tabs are identified by tab_id or locator
tab_id_locator = request.json['tab_id_locator']
# Find the given tab in the course
tab = get_tab_by_tab_id_locator(course_item.tabs, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
if 'is_hidden' in request.json:
# set the is_hidden attribute on the requested tab
tab.is_hidden = request.json['is_hidden']
modulestore('direct').update_item(course_item, request.user.id)
else:
raise NotImplementedError('Unsupported request to edit tab: {0}'.format(request.json))
return JsonResponse()
def get_tab_by_tab_id_locator(tab_list, tab_id_locator):
"""
Look for a tab with the specified tab_id or locator. Returns the first matching tab.
"""
if 'tab_id' in tab_id_locator:
tab = CourseTabList.get_tab_by_id(tab_list, tab_id_locator['tab_id'])
elif 'tab_locator' in tab_id_locator:
tab = get_tab_by_locator(tab_list, tab_id_locator['tab_locator'])
return tab
def get_tab_by_locator(tab_list, tab_locator):
"""
Look for a tab with the specified locator. Returns the first matching tab.
"""
tab_location = loc_mapper().translate_locator_to_location(BlockUsageLocator(tab_locator))
item = modulestore('direct').get_item(tab_location)
static_tab = StaticTab(
name=item.display_name,
url_slug=item.location.name,
)
return CourseTabList.get_tab_by_id(tab_list, static_tab.tab_id)
# "primitive" tab edit functions driven by the command line.
# These should be replaced/deleted by a more capable GUI someday.
# Note that the command line UI identifies the tabs with 1-based
# indexing, but this implementation code is standard 0-based.
def validate_args(num, tab_type):
"Throws for the disallowed cases."
if num <= 1:
raise ValueError('Tabs 1 and 2 cannot be edited')
if tab_type == 'static_tab':
raise ValueError('Tabs of type static_tab cannot be edited here (use Studio)')
def primitive_delete(course, num):
"Deletes the given tab number (0 based)."
tabs = course.tabs
validate_args(num, tabs[num].get('type', ''))
del tabs[num]
# Note for future implementations: if you delete a static_tab, then Chris Dodge
# points out that there's other stuff to delete beyond this element.
# This code happens to not delete static_tab so it doesn't come up.
modulestore('direct').update_item(course, '**replace_user**')
def primitive_insert(course, num, tab_type, name):
"Inserts a new tab at the given number (0 based)."
validate_args(num, tab_type)
new_tab = CourseTab.from_json({u'type': unicode(tab_type), u'name': unicode(name)})
tabs = course.tabs
tabs.insert(num, new_tab)
modulestore('direct').update_item(course, '**replace_user**')
| 0.002995 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkRulesOperations(object):
"""VirtualNetworkRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkRule"
"""Gets a virtual network rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkRule, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
parameters, # type: "_models.VirtualNetworkRule"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkRule"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
parameters, # type: "_models.VirtualNetworkRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkRule"]
"""Creates or updates an existing virtual network rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:param parameters: The requested virtual Network Rule Resource state.
:type parameters: ~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the virtual network rule with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkRuleListResult"]
"""Gets a list of virtual network rules in a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules'} # type: ignore
| 0.004838 |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from glance.common import crypt
from glance.common import exception
import glance.context
import glance.db
from glance.openstack.common import uuidutils
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
CONF = cfg.CONF
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
UUID1_LOCATION = 'file:///path/to/image'
UUID1_LOCATION_METADATA = {'key': 'value'}
UUID3_LOCATION = 'http://somehost.com/place'
CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c'
CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55'
def _db_fixture(id, **kwargs):
obj = {
'id': id,
'name': None,
'is_public': False,
'properties': {},
'checksum': None,
'owner': None,
'status': 'queued',
'tags': [],
'size': None,
'locations': [],
'protected': False,
'disk_format': None,
'container_format': None,
'deleted': False,
'min_ram': None,
'min_disk': None,
}
obj.update(kwargs)
return obj
def _db_image_member_fixture(image_id, member_id, **kwargs):
obj = {
'image_id': image_id,
'member': member_id,
}
obj.update(kwargs)
return obj
class TestImageRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self._create_images()
self._create_image_members()
def _create_images(self):
self.db.reset()
self.images = [
_db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM,
name='1', size=256,
is_public=True, status='active',
locations=[{'url': UUID1_LOCATION,
'metadata': UUID1_LOCATION_METADATA}]),
_db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1,
name='2', size=512, is_public=False),
_db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1,
name='3', size=1024, is_public=True,
locations=[{'url': UUID3_LOCATION,
'metadata': {}}]),
_db_fixture(UUID4, owner=TENANT4, name='4', size=2048),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID2, TENANT2),
_db_image_member_fixture(UUID2, TENANT3, status='accepted'),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_get(self):
image = self.image_repo.get(UUID1)
self.assertEquals(image.image_id, UUID1)
self.assertEquals(image.name, '1')
self.assertEquals(image.tags, set(['ping', 'pong']))
self.assertEquals(image.visibility, 'public')
self.assertEquals(image.status, 'active')
self.assertEquals(image.size, 256)
self.assertEquals(image.owner, TENANT1)
def test_location_value(self):
image = self.image_repo.get(UUID3)
self.assertEqual(image.locations[0]['url'], UUID3_LOCATION)
def test_location_data_value(self):
image = self.image_repo.get(UUID1)
self.assertEqual(image.locations[0]['url'], UUID1_LOCATION)
self.assertEqual(image.locations[0]['metadata'],
UUID1_LOCATION_METADATA)
def test_location_data_exists(self):
image = self.image_repo.get(UUID2)
self.assertEqual(image.locations, [])
def test_get_not_found(self):
self.assertRaises(exception.NotFound, self.image_repo.get,
uuidutils.generate_uuid())
def test_get_forbidden(self):
self.assertRaises(exception.NotFound, self.image_repo.get, UUID4)
def test_list(self):
images = self.image_repo.list()
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids)
def _do_test_list_status(self, status, expected):
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT3)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
images = self.image_repo.list(member_status=status)
self.assertEqual(expected, len(images))
def test_list_status(self):
self._do_test_list_status(None, 3)
def test_list_status_pending(self):
self._do_test_list_status('pending', 2)
def test_list_status_rejected(self):
self._do_test_list_status('rejected', 2)
def test_list_status_all(self):
self._do_test_list_status('all', 3)
def test_list_with_marker(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0])
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(actual_ids, full_ids[1:])
def test_list_with_last_marker(self):
images = self.image_repo.list()
marked_images = self.image_repo.list(marker=images[-1].image_id)
self.assertEqual(len(marked_images), 0)
def test_limited_list(self):
limited_images = self.image_repo.list(limit=2)
self.assertEqual(len(limited_images), 2)
def test_list_with_marker_and_limit(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0], limit=1)
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(actual_ids, full_ids[1:2])
def test_list_private_images(self):
filters = {'visibility': 'private'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID2]), image_ids)
def test_list_with_checksum_filter_single_image(self):
filters = {'checksum': CHECKSUM}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_checksum_filter_multiple_images(self):
filters = {'checksum': CHCKSUM1}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(2, len(image_ids))
self.assertEqual([UUID3, UUID2], image_ids)
def test_list_with_wrong_checksum(self):
WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd'
filters = {'checksum': WRONG_CHKSUM}
images = self.image_repo.list(filters=filters)
self.assertEquals(0, len(images))
def test_list_with_tags_filter_single_tag(self):
filters = {'tags': ['ping']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags(self):
filters = {'tags': ['ping', 'pong']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags_and_nonexistent(self):
filters = {'tags': ['ping', 'fake']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(0, len(image_ids))
def test_list_with_wrong_tags(self):
filters = {'tags': ['fake']}
images = self.image_repo.list(filters=filters)
self.assertEquals(0, len(images))
def test_list_public_images(self):
filters = {'visibility': 'public'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID3]), image_ids)
def test_sorted_list(self):
images = self.image_repo.list(sort_key='size', sort_dir='asc')
image_ids = [i.image_id for i in images]
self.assertEqual([UUID1, UUID2, UUID3], image_ids)
def test_add_image(self):
image = self.image_factory.new_image(name='added image')
self.assertEqual(image.updated_at, image.created_at)
self.image_repo.add(image)
retreived_image = self.image_repo.get(image.image_id)
self.assertEqual(retreived_image.name, 'added image')
self.assertEqual(retreived_image.updated_at, image.updated_at)
def test_save_image(self):
image = self.image_repo.get(UUID1)
original_update_time = image.updated_at
image.name = 'foo'
image.tags = ['king', 'kong']
self.image_repo.save(image)
current_update_time = image.updated_at
self.assertTrue(current_update_time > original_update_time)
image = self.image_repo.get(UUID1)
self.assertEqual(image.name, 'foo')
self.assertEqual(image.tags, set(['king', 'kong']))
self.assertEqual(image.updated_at, current_update_time)
def test_remove_image(self):
image = self.image_repo.get(UUID1)
previous_update_time = image.updated_at
self.image_repo.remove(image)
self.assertTrue(image.updated_at > previous_update_time)
self.assertRaises(exception.NotFound, self.image_repo.get, UUID1)
class TestEncryptedLocations(test_utils.BaseTestCase):
def setUp(self):
super(TestEncryptedLocations, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self.crypt_key = '0123456789abcdef'
self.config(metadata_encryption_key=self.crypt_key)
self.foo_bar_location = [{'url': 'foo', 'metadata': {}},
{'url': 'bar', 'metadata': {}}]
def test_encrypt_locations_on_add(self):
image = self.image_factory.new_image(UUID1)
image.locations = self.foo_bar_location
self.image_repo.add(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual(decrypted_locations,
[l['url'] for l in self.foo_bar_location])
def test_encrypt_locations_on_save(self):
image = self.image_factory.new_image(UUID1)
self.image_repo.add(image)
image.locations = self.foo_bar_location
self.image_repo.save(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual(decrypted_locations,
[l['url'] for l in self.foo_bar_location])
def test_decrypt_locations_on_get(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}} for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.get(UUID1)
self.assertEqual(image.locations, orig_locations)
def test_decrypt_locations_on_list(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}} for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.list()[0]
self.assertEqual(image.locations, orig_locations)
class TestImageMemberRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_member_factory = glance.domain.ImageMemberFactory()
self._create_images()
self._create_image_members()
image = self.image_repo.get(UUID1)
self.image_member_repo = glance.db.ImageMemberRepo(self.context,
self.db, image)
def _create_images(self):
self.images = [
_db_fixture(UUID1, owner=TENANT1, name='1', size=256,
status='active'),
_db_fixture(UUID2, owner=TENANT1, name='2',
size=512, is_public=False),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID1, TENANT2),
_db_image_member_fixture(UUID1, TENANT3),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_list(self):
image_members = self.image_member_repo.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([TENANT2, TENANT3]), image_member_ids)
def test_list_no_members(self):
image = self.image_repo.get(UUID2)
self.image_member_repo_uuid2 = glance.db.ImageMemberRepo(
self.context, self.db, image)
image_members = self.image_member_repo_uuid2.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([]), image_member_ids)
def test_save_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
image_member.status = 'accepted'
image_member_updated = self.image_member_repo.save(image_member)
self.assertTrue(image_member.id, image_member_updated.id)
self.assertEqual(image_member_updated.status, 'accepted')
def test_add_image_member(self):
image = self.image_repo.get(UUID1)
image_member = self.image_member_factory.new_image_member(image,
TENANT4)
self.assertTrue(image_member.id is None)
retreived_image_member = self.image_member_repo.add(image_member)
self.assertEqual(retreived_image_member.id, image_member.id)
self.assertEqual(retreived_image_member.image_id,
image_member.image_id)
self.assertEqual(retreived_image_member.member_id,
image_member.member_id)
self.assertEqual(retreived_image_member.status,
'pending')
def test_remove_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
self.image_member_repo.remove(image_member)
self.assertRaises(exception.NotFound, self.image_member_repo.get,
TENANT2)
def test_remove_image_member_does_not_exist(self):
image = self.image_repo.get(UUID2)
fake_member = glance.domain.ImageMemberFactory()\
.new_image_member(image, TENANT4)
self.assertRaises(exception.NotFound, self.image_member_repo.remove,
fake_member)
| 0.000672 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_balance(osv.osv_memory):
"""
This wizard will provide the partner balance report by periods, between any two dates.
"""
_inherit = 'account.common.partner.report'
_name = 'account.partner.balance'
_description = 'Print Account Partner Balance'
_columns = {
'display_partner': fields.selection([('non-zero_balance', 'With balance is not equal to 0'), ('all', 'All Partners')]
,'Display Partners'),
'journal_ids': fields.many2many('account.journal', 'account_partner_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
# 'initial_balance': True,
'display_partner': 'non-zero_balance',
}
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['display_partner'])[0])
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.partner.balance',
'datas': data,
}
account_partner_balance()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.004371 |
import hashlib
import hmac
import logging
import requests
from datetime import timedelta
from django.utils import timezone
from allauth.socialaccount import app_settings, providers
from allauth.socialaccount.helpers import (
complete_social_login,
render_authentication_error,
)
from allauth.socialaccount.models import SocialLogin, SocialToken
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .forms import FacebookConnectForm
from .provider import GRAPH_API_URL, GRAPH_API_VERSION, FacebookProvider
logger = logging.getLogger(__name__)
def compute_appsecret_proof(app, token):
# Generate an appsecret_proof parameter to secure the Graph API call
# see https://developers.facebook.com/docs/graph-api/securing-requests
msg = token.token.encode('utf-8')
key = app.secret.encode('utf-8')
appsecret_proof = hmac.new(
key,
msg,
digestmod=hashlib.sha256).hexdigest()
return appsecret_proof
def fb_complete_login(request, app, token):
provider = providers.registry.by_id(FacebookProvider.id, request)
resp = requests.get(
GRAPH_API_URL + '/me',
params={
'fields': ','.join(provider.get_fields()),
'access_token': token.token,
'appsecret_proof': compute_appsecret_proof(app, token)
})
resp.raise_for_status()
extra_data = resp.json()
login = provider.sociallogin_from_response(request, extra_data)
return login
class FacebookOAuth2Adapter(OAuth2Adapter):
provider_id = FacebookProvider.id
provider_default_auth_url = (
'https://www.facebook.com/{}/dialog/oauth'.format(
GRAPH_API_VERSION))
settings = app_settings.PROVIDERS.get(provider_id, {})
authorize_url = settings.get('AUTHORIZE_URL', provider_default_auth_url)
access_token_url = GRAPH_API_URL + '/oauth/access_token'
expires_in_key = 'expires_in'
def complete_login(self, request, app, access_token, **kwargs):
return fb_complete_login(request, app, access_token)
oauth2_login = OAuth2LoginView.adapter_view(FacebookOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FacebookOAuth2Adapter)
def login_by_token(request):
ret = None
auth_exception = None
if request.method == 'POST':
form = FacebookConnectForm(request.POST)
if form.is_valid():
try:
provider = providers.registry.by_id(
FacebookProvider.id, request)
login_options = provider.get_fb_login_options(request)
app = provider.get_app(request)
access_token = form.cleaned_data['access_token']
expires_at = None
if login_options.get('auth_type') == 'reauthenticate':
info = requests.get(
GRAPH_API_URL + '/oauth/access_token_info',
params={'client_id': app.client_id,
'access_token': access_token}).json()
nonce = provider.get_nonce(request, pop=True)
ok = nonce and nonce == info.get('auth_nonce')
else:
ok = True
if ok and provider.get_settings().get('EXCHANGE_TOKEN'):
resp = requests.get(
GRAPH_API_URL + '/oauth/access_token',
params={'grant_type': 'fb_exchange_token',
'client_id': app.client_id,
'client_secret': app.secret,
'fb_exchange_token': access_token}).json()
access_token = resp['access_token']
expires_in = resp.get('expires_in')
if expires_in:
expires_at = timezone.now() + timedelta(
seconds=int(expires_in))
if ok:
token = SocialToken(app=app,
token=access_token,
expires_at=expires_at)
login = fb_complete_login(request, app, token)
login.token = token
login.state = SocialLogin.state_from_request(request)
ret = complete_social_login(request, login)
except requests.RequestException as e:
logger.exception('Error accessing FB user profile')
auth_exception = e
if not ret:
ret = render_authentication_error(request,
FacebookProvider.id,
exception=auth_exception)
return ret
| 0 |
bdLibPath=os.path.abspath(sys.argv[0]+"..")
if not bdLibPath in sys.path: sys.path.append(bdLibPath)
from _lib import *
import unittest
class SmokeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_001_GoogleSearch(self):
LaunchBrowser("chrome", "www.google.com")
type(GoogleMap.google_search_input, "Telerik academy")
wait(GoogleMap.google_telerik_link, 10)
assert exists(GoogleMap.google_telerik_link)
def test_002_DragAndDrop(self):
LaunchBrowser("chrome", "http://www.dhtmlgoodies.com/scripts/drag-drop-custom/demo-drag-drop-3.html")
dragDrop(CapitalsMap.oslo, CapitalsMap.norway)
dragDrop(CapitalsMap.stockholm, CapitalsMap.sweden)
dragDrop(CapitalsMap.washington, CapitalsMap.us)
dragDrop(CapitalsMap.copenhagen, CapitalsMap.denmark)
dragDrop(CapitalsMap.seoul, CapitalsMap.southKorea)
dragDrop(CapitalsMap.rome, CapitalsMap.italy)
dragDrop(CapitalsMap.madrid, CapitalsMap.spain)
assert exists(CapitalsMap.correctRome)
assert exists(CapitalsMap.correctMadrid)
assert exists(CapitalsMap.correctOslo)
assert exists(CapitalsMap.correctCopenhagen)
assert exists(CapitalsMap.correctSeoul)
assert exists(CapitalsMap.correctStockholm)
assert exists(CapitalsMap.correctWashington)
def test_003_CalculatorFunctionsCorrectly(self):
LaunchCalculator();
click(CalculatorMap.two)
click(CalculatorMap.subtract)
click(CalculatorMap.four)
click(CalculatorMap.equals)
assert exists(CalculatorMap.subtractionResult)
click(CalculatorMap.multiply)
click(CalculatorMap.three)
click(CalculatorMap.equals)
assert exists(CalculatorMap.multiplyResult)
click(CalculatorMap.add)
click(CalculatorMap.one)
click(CalculatorMap.one)
click(CalculatorMap.equals)
assert exists (CalculatorMap.additionResult)
click(CalculatorMap.divide)
click(CalculatorMap.two)
click(CalculatorMap.equals)
assert (CalculatorMap.divisionResult)
click(CalculatorMap.divide)
click(CalculatorMap.zero)
assert exists(CalculatorMap.divisionByZeroMessage)
class Tests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_003_CalculatorFunctionsCorrectly(self):
LaunchCalculator();
click(CalculatorMap.two)
click(CalculatorMap.subtract)
click(CalculatorMap.four)
click(CalculatorMap.equals)
assert exists(CalculatorMap.subtractionResult)
click(CalculatorMap.multiply)
click(CalculatorMap.three)
click(CalculatorMap.equals)
assert exists(CalculatorMap.multiplyResult)
click(CalculatorMap.add)
click(CalculatorMap.one)
click(CalculatorMap.zero)
click(CalculatorMap.equals)
assert exists (CalculatorMap.additionResult)
click(CalculatorMap.divide)
click(CalculatorMap.two)
click(CalculatorMap.equals)
assert exists(CalculatorMap.divisionResult)
click(CalculatorMap.divide)
click(CalculatorMap.zero)
click(CalculatorMap.equals)
assert exists(CalculatorMap.divisionByZeroMessage)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
outfile = open("report.html", "w")
runner = HTMLTestRunner.HTMLTestRunner(stream=outfile, title='SmokeTests Report')
runner.run(suite)
outfile.close()
| 0.007339 |
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <[email protected]>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
import numpy as np
from horus import Singleton
@Singleton
class Pattern(object):
def __init__(self):
self._rows = 0
self._columns = 0
self._square_width = 0
self.origin_distance = 0
@property
def rows(self):
return self._rows
@rows.setter
def rows(self, value):
value = self.to_int(value)
if self._rows != value:
self._rows = value
self._generate_object_points()
def set_rows(self, value):
self.rows = value
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
value = self.to_int(value)
if self._columns != value:
self._columns = value
self._generate_object_points()
def set_columns(self, value):
self.columns = value
@property
def square_width(self):
return self._square_width
@square_width.setter
def square_width(self, value):
value = self.to_float(value)
if self._square_width != value:
self._square_width = value
self._generate_object_points()
def set_square_width(self, value):
self.square_width = value
def _generate_object_points(self):
objp = np.zeros((self.rows * self.columns, 3), np.float32)
objp[:, :2] = np.mgrid[0:self.columns, 0:self.rows].T.reshape(-1, 2)
objp = np.multiply(objp, self.square_width)
self.object_points = objp
def set_origin_distance(self, value):
self.origin_distance = self.to_float(value)
def to_int(self, value):
try:
value = int(value)
if value > 0:
return value
else:
return 0
except:
return 0
def to_float(self, value):
try:
value = float(value)
if value > 0.0:
return value
else:
return 0.0
except:
return 0.0
| 0.001311 |
'''
Image
=====
The :class:`Image` widget is used to display an image::
wimg = Image(source='mylogo.png')
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style.
For example, if you want your image to be greater than,the size of your widget,
you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
'''
__all__ = ('Image', 'AsyncImage')
from kivy.uix.widget import Widget
from kivy.core.image import Image as CoreImage
from kivy.resources import resource_find
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty
from kivy.logger import Logger
# delayed imports
Loader = None
class Image(Widget):
'''Image class, see module documentation for more information.
'''
source = StringProperty(None)
'''Filename / source of your image.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture. It is streched and positioned during rendering according to
the :attr:`allow_stretch` and :attr:`keep_ratio` properties.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is a :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the image. This represents the original, loaded image
texture size.
.. warning::
The texture size is set after the texture property. So if you listen to
the change on :attr:`texture`, the property texture_size will not be
up-to-date. Use self.texture.size instead.
'''
def get_image_ratio(self):
if self.texture:
return self.texture.width / float(self.texture.height)
return 1.
mipmap = BooleanProperty(False)
'''Indicate if you want OpenGL mipmapping to be applied to the texture.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
'''Ratio of the image (width / float(height).
:attr:`image_ratio` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
color = ListProperty([1, 1, 1, 1])
'''Image color, in the format (r, g, b, a). This attribute can be used to
'tint' an image. Be careful: if the source image is not gray/white, the
color will not really work as expected.
.. versionadded:: 1.0.6
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
allow_stretch = BooleanProperty(False)
'''If True, the normalized image size will be maximized to fit in the image
box. Otherwise, if the box is too tall, the image will not be
stretched more than 1:1 pixels.
.. versionadded:: 1.0.7
:attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keep_ratio = BooleanProperty(True)
'''If False along with allow_stretch being True, the normalized image
size will be maximized to fit in the image box and ignores the aspect
ratio of the image.
Otherwise, if the box is too tall, the image will not be stretched more
than 1:1 pixels.
.. versionadded:: 1.0.8
:attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
keep_data = BooleanProperty(False)
'''If True, the underlaying _coreimage will store the raw image data.
This is useful when performing pixel based collision detection.
.. versionadded:: 1.3.0
:attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
anim_delay = NumericProperty(.25)
'''Delay the animation if the image is sequenced (like an animated gif).
If anim_delay is set to -1, the animation will be stopped.
.. versionadded:: 1.0.8
:attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25 (4 FPS).
'''
anim_loop = NumericProperty(0)
'''Number of loops to play then stop animating. 0 means keep animating.
.. versionadded:: 1.9.0
:attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` defaults
to 0.
'''
nocache = BooleanProperty(False)
'''If this property is set True, the image will not be added to the
internal cache. The cache will simply ignore any calls trying to
append the core image.
.. versionadded:: 1.6.0
:attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
def get_norm_image_size(self):
if not self.texture:
return self.size
ratio = self.image_ratio
w, h = self.size
tw, th = self.texture.size
# ensure that the width is always maximized to the containter width
if self.allow_stretch:
if not self.keep_ratio:
return w, h
iw = w
else:
iw = min(w, tw)
# calculate the appropriate height
ih = iw / ratio
# if the height is too higher, take the height of the container
# and calculate appropriate width. no need to test further. :)
if ih > h:
if self.allow_stretch:
ih = h
else:
ih = min(h, th)
iw = ih * ratio
return iw, ih
norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
'texture', 'size', 'image_ratio', 'allow_stretch'))
'''Normalized image size within the widget box.
This size will always fit the widget size and will preserve the image
ratio.
:attr:`norm_image_size` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
def __init__(self, **kwargs):
self._coreimage = None
self._loops = 0
super(Image, self).__init__(**kwargs)
fbind = self.fbind
update = self.texture_update
fbind('source', update)
fbind('mipmap', update)
if self.source:
update()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def texture_update(self, *largs):
if not self.source:
self.texture = None
else:
filename = resource_find(self.source)
self._loops = 0
if filename is None:
return Logger.error('Image: Error reading file {filename}'.
format(filename=self.source))
mipmap = self.mipmap
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
try:
self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
anim_delay=self.anim_delay,
keep_data=self.keep_data,
nocache=self.nocache)
except:
self._coreimage = ci = None
if ci:
ci.bind(on_texture=self._on_tex_change)
self.texture = ci.texture
def on_anim_delay(self, instance, value):
self._loop = 0
if self._coreimage is None:
return
self._coreimage.anim_delay = value
if value < 0:
self._coreimage.anim_reset(False)
def on_texture(self, instance, value):
if value is not None:
self.texture_size = list(value.size)
def _on_tex_change(self, *largs):
# update texture from core image
self.texture = self._coreimage.texture
ci = self._coreimage
if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1:
self._loops += 1
if self.anim_loop == self._loops:
ci.anim_reset(False)
self._loops = 0
def reload(self):
'''Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
'''
try:
self._coreimage.remove_from_cache()
except AttributeError:
pass
olsource = self.source
self.source = ''
self.source = olsource
def on_nocache(self, *args):
if self.nocache and self._coreimage:
self._coreimage.remove_from_cache()
self._coreimage._nocache = True
class AsyncImage(Image):
'''Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
'''
def __init__(self, **kwargs):
self._coreimage = None
super(AsyncImage, self).__init__(**kwargs)
global Loader
if not Loader:
from kivy.loader import Loader
self.fbind('source', self._load_source)
if self.source:
self._load_source()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def _load_source(self, *args):
source = self.source
if not source:
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
if not self.is_uri(source):
source = resource_find(source)
self._coreimage = image = Loader.image(source,
nocache=self.nocache, mipmap=self.mipmap,
anim_delay=self.anim_delay)
image.bind(on_load=self._on_source_load)
image.bind(on_texture=self._on_tex_change)
self.texture = image.texture
def _on_source_load(self, value):
image = self._coreimage.image
if not image:
return
self.texture = image.texture
def is_uri(self, filename):
proto = filename.split('://', 1)[0]
return proto in ('http', 'https', 'ftp', 'smb')
def _on_tex_change(self, *largs):
if self._coreimage:
self.texture = self._coreimage.texture
def texture_update(self, *largs):
pass
| 0.000337 |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: elasticache_facts
short_description: Retrieve facts for AWS Elasticache clusters
description:
- Retrieve facts from AWS Elasticache clusters
version_added: "2.5"
options:
name:
description:
- The name of an Elasticache cluster
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: obtain all Elasticache facts
elasticache_facts:
- name: obtain all facts for a single Elasticache cluster
elasticache_facts:
name: test_elasticache
'''
RETURN = '''
elasticache_clusters:
description: List of elasticache clusters
returned: always
type: complex
contains:
auto_minor_version_upgrade:
description: Whether to automatically upgrade to minor versions
returned: always
type: bool
sample: true
cache_cluster_create_time:
description: Date and time cluster was created
returned: always
type: string
sample: '2017-09-15T05:43:46.038000+00:00'
cache_cluster_id:
description: ID of the cache cluster
returned: always
type: string
sample: abcd-1234-001
cache_cluster_status:
description: Status of Elasticache cluster
returned: always
type: string
sample: available
cache_node_type:
description: Instance type of Elasticache nodes
returned: always
type: string
sample: cache.t2.micro
cache_nodes:
description: List of Elasticache nodes in the cluster
returned: always
type: complex
contains:
cache_node_create_time:
description: Date and time node was created
returned: always
type: string
sample: '2017-09-15T05:43:46.038000+00:00'
cache_node_id:
description: ID of the cache node
returned: always
type: string
sample: '0001'
cache_node_status:
description: Status of the cache node
returned: always
type: string
sample: available
customer_availability_zone:
description: Availability Zone in which the cache node was created
returned: always
type: string
sample: ap-southeast-2b
endpoint:
description: Connection details for the cache node
returned: always
type: complex
contains:
address:
description: URL of the cache node endpoint
returned: always
type: string
sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
port:
description: Port of the cache node endpoint
returned: always
type: int
sample: 6379
parameter_grou_status:
description: Status of the Cache Parameter Group
returned: always
type: string
sample: in-sync
cache_parameter_group:
description: Contents of the Cache Parameter GGroup
returned: always
type: complex
contains:
cache_node_ids_to_reboot:
description: Cache nodes which need to be rebooted for parameter changes to be applied
returned: always
type: list
sample: []
cache_parameter_group_name:
description: Name of the cache parameter group
returned: always
type: string
sample: default.redis3.2
parameter_apply_status:
description: Status of parameter updates
returned: always
type: string
sample: in-sync
cache_security_groups:
description: Security Groups used by the cache
returned: always
type: list
sample:
- 'sg-abcd1234'
cache_subnet_group_name:
description: Elasticache Subnet Group used by the cache
returned: always
type: string
sample: abcd-subnet-group
client_download_landing_page:
description: URL of client download web page
returned: always
type: string
sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
engine:
description: Engine used by elasticache
returned: always
type: string
sample: redis
engine_version:
description: Version of elasticache engine
returned: always
type: string
sample: 3.2.4
notification_configuration:
description: Configuration of notifications
returned: if notifications are enabled
type: complex
contains:
topic_arn:
description: ARN of notification destination topic
returned: if notifications are enabled
type: string
sample: arn:aws:sns:*:123456789012:my_topic
topic_name:
description: Name of notification destination topic
returned: if notifications are enabled
type: string
sample: MyTopic
num_cache_nodes:
description: Number of Cache Nodes
returned: always
type: int
sample: 1
pending_modified_values:
description: Values that are pending modification
returned: always
type: complex
contains: {}
preferred_availability_zone:
description: Preferred Availability Zone
returned: always
type: string
sample: ap-southeast-2b
preferred_maintenance_window:
description: Time slot for preferred maintenance window
returned: always
type: string
sample: sat:12:00-sat:13:00
replication_group_id:
description: Replication Group Id
returned: always
type: string
sample: replication-001
security_groups:
description: List of Security Groups associated with Elasticache
returned: always
type: complex
contains:
security_group_id:
description: Security Group ID
returned: always
type: string
sample: sg-abcd1234
status:
description: Status of Security Group
returned: always
type: string
sample: active
tags:
description: Tags applied to the elasticache cluster
returned: always
type: complex
sample:
Application: web
Environment: test
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def describe_cache_clusters_with_backoff(client, cluster_id=None):
paginator = client.get_paginator('describe_cache_clusters')
params = dict(ShowCacheNodeInfo=True)
if cluster_id:
params['CacheClusterId'] = cluster_id
try:
response = paginator.paginate(**params).build_full_result()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
return []
raise
except botocore.exceptions.BotoCoreError:
raise
return response['CacheClusters']
@AWSRetry.exponential_backoff()
def get_elasticache_tags_with_backoff(client, cluster_id):
return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
def get_aws_account_id(module):
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Can't authorize connection")
try:
return client.get_caller_identity()['Account']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
def get_elasticache_clusters(client, module, region):
try:
clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
account_id = get_aws_account_id(module)
results = []
for cluster in clusters:
cluster = camel_dict_to_snake_dict(cluster)
arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
try:
tags = get_elasticache_tags_with_backoff(client, arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
results.append(cluster)
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=False),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module, region))
if __name__ == '__main__':
main()
| 0.001913 |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to absolute (and therefore normalized paths).
path = os.path.abspath(path)
relative_to = os.path.abspath(relative_to)
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
'freebsd9': 'freebsd',
}
flavor = flavors.get(sys.platform, 'linux')
return params.get('flavor', flavor)
def CopyTool(flavor, out_path):
"""Finds (mac|sun|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
prefix = { 'solaris': 'sun', 'mac': 'mac', 'win': 'win' }.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| 0.011239 |
#!/usr/bin/env python3
from ev3dev.ev3 import *
from time import sleep
from PIL import Image
import ev3dev.ev3 as ev3
#connect infrared and check it's connected.
ir = InfraredSensor()
assert ir.connected, "Connect a single infrared sensor to port"
#put the infrared sensor into proximity mode.
ir.mode = 'IR-PROX'
#connect color sensor and check it's connected.
cl = ColorSensor()
assert cl.connected
#put the color sensor into color mode
cl.mode= 'COL-COLOR'
#add condition here
#while
distance = ir.value()
if distance < 500:
Leds.set_color(Leds.LEFT, Leds.GREEN)
lcd = Screen()
logo = Image.open('chase.png')
lcd.image.paste(logo, (0,0))
lcd.update()
ev3.Sound.speak('Welcome to JP Morgan Chase. Who are you looking for?').wait()
dest = cl.value()
else:
Leds.all_off()
sleep(2)
#create motor objects
lm = LargeMotor('outB')
rm = LargeMotor ('outC')
destinations = {1: (100, 200), 2:(100, 100), 5:(300, 500)}
desk_speech = 'Taking you to desk number {}'.format(dest)
ev3.Sound.speak(desk_speech).wait()
#go straight for 3 feet (in degrees)
lm.run_to_rel_pos(position_sp=destinations[dest][0], speed_sp=300, stop_action="brake")
rm.run_to_rel_pos(position_sp=destinations[dest][0], speed_sp=300, stop_action="brake")
lm.wait_while('running')
rm.wait_while('running')
#verify the motor is no longer running
#Sound.beep()
#turn right
lm.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action="brake")
rm.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action="brake")
#go straight 2 feet (in degrees)
lm.wait_while('running')
rm.wait_while('running')
lm.run_to_rel_pos(position_sp=destinations[dest][1], speed_sp=900, stop_action="brake")
rm.run_to_rel_pos(position_sp=destinations[dest][1], speed_sp=900, stop_action="brake")
| 0.018676 |
import angr
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
######################################
# fdopen
#
# Reference for implementation:
# glibc-2.25/libio/iofdopen.c
######################################
def mode_to_flag(mode):
# TODO improve this: handle mode = strings
if mode[-1] == b'b': # lol who uses windows
mode = mode[:-1]
all_modes = {
b"r" : angr.storage.file.Flags.O_RDONLY,
b"r+" : angr.storage.file.Flags.O_RDWR,
b"w" : angr.storage.file.Flags.O_WRONLY | angr.storage.file.Flags.O_CREAT,
b"w+" : angr.storage.file.Flags.O_RDWR | angr.storage.file.Flags.O_CREAT,
b"a" : angr.storage.file.Flags.O_WRONLY | angr.storage.file.Flags.O_CREAT | angr.storage.file.Flags.O_APPEND,
b"a+" : angr.storage.file.Flags.O_RDWR | angr.storage.file.Flags.O_CREAT | angr.storage.file.Flags.O_APPEND
}
if mode not in all_modes:
raise angr.SimProcedureError('unsupported file open mode %s' % mode)
return all_modes[mode]
class fdopen(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd_int, m_addr):
#pylint:disable=unused-variable
strlen = angr.SIM_PROCEDURES['libc']['strlen']
m_strlen = self.inline_call(strlen, m_addr)
m_expr = self.state.memory.load(m_addr, m_strlen.max_null_index, endness='Iend_BE')
mode = self.state.solver.eval(m_expr, cast_to=bytes)
# TODO: handle append and other mode subtleties
fd = self.state.solver.eval(fd_int)
if fd not in self.state.posix.fd:
# if file descriptor not found return NULL
return 0
else:
# Allocate a FILE struct in heap
malloc = angr.SIM_PROCEDURES['libc']['malloc']
io_file_data = io_file_data_for_arch(self.state.arch)
file_struct_ptr = self.inline_call(malloc, io_file_data['size']).ret_expr
# Write the fd
fd_bvv = self.state.solver.BVV(fd, 4 * 8) # int
self.state.memory.store(file_struct_ptr + io_file_data['fd'],
fd_bvv,
endness=self.state.arch.memory_endness)
return file_struct_ptr
| 0.007515 |
# encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import platform
import zmq
from distutils.version import LooseVersion as V
from traitlets.config.application import Application
from IPython.utils import io
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
if _use_appnope() and kernel._darwin_app_nap:
from appnope import nope_scope as context
else:
from contextlib import contextmanager
@contextmanager
def context():
yield
def process_stream_events():
while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN:
with context():
kernel.do_one_iteration()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
# mapping of keys to loop functions
loop_map = {
'inline': None,
'nbagg': None,
'notebook': None,
'ipympl': None,
None : None,
}
def register_integration(*toolkitnames):
"""Decorator to register an event loop to integrate with the IPython kernel
The decorator takes names to register the event loop as for the %gui magic.
You can provide alternative names for the same toolkit.
The decorated function should take a single argument, the IPython kernel
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
at least every ``kernel._poll_interval`` seconds, and start the event loop.
:mod:`ipykernel.eventloops` provides and registers such functions
for a few common event loops.
"""
def decorator(func):
for name in toolkitnames:
loop_map[name] = func
return func
return decorator
@register_integration('qt', 'qt4')
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
for s in kernel.shell_streams:
_notify_stream_qt(kernel, s)
start_event_loop_qt4(kernel.app)
@register_integration('qt5')
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration."""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
@register_integration('wx')
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
from IPython.lib.guisupport import start_event_loop_wx
if _use_appnope() and kernel._darwin_app_nap:
# we don't hook up App Nap contexts for Wx,
# just disable it outright.
from appnope import nope
nope()
doi = kernel.do_one_iteration
# Wx uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to drive the ZMQ event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(doi)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
start_event_loop_wx(kernel.app)
@register_integration('tk')
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# For Tkinter, we create a Tk object and call its withdraw method.
class Timer(object):
def __init__(self, func):
self.app = Tk()
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.timer = Timer(doi)
kernel.timer.start()
@register_integration('gtk')
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
@register_integration('gtk3')
def loop_gtk3(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtk3embed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
@register_integration('osx')
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop
| 0.002528 |
"""Visualization component for Jupyter Notebooks."""
from pathlib import Path
import numpy as np
import nibabel as nb
from .utils import compose_view, plot_registration, cuts_from_bbox
def display(
fixed_image,
moving_image,
contour=None,
cuts=None,
fixed_label="F",
moving_label="M",
):
"""Plot the flickering panels to show a registration process."""
from IPython.display import SVG, display as _disp
if isinstance(fixed_image, (str, Path)):
fixed_image = nb.load(str(fixed_image))
if isinstance(moving_image, (str, Path)):
moving_image = nb.load(str(moving_image))
if cuts is None:
n_cuts = 7
if contour is not None:
if isinstance(contour, (str, Path)):
contour = nb.load(str(contour))
cuts = cuts_from_bbox(contour, cuts=n_cuts)
else:
hdr = fixed_image.header.copy()
hdr.set_data_dtype("uint8")
mask_nii = nb.Nifti1Image(
np.ones(fixed_image.shape, dtype="uint8"), fixed_image.affine, hdr
)
cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)
# Call composer
_disp(
SVG(
compose_view(
plot_registration(
fixed_image,
"fixed-image",
estimate_brightness=True,
cuts=cuts,
label=fixed_label,
contour=contour,
compress=False,
),
plot_registration(
moving_image,
"moving-image",
estimate_brightness=True,
cuts=cuts,
label=moving_label,
contour=contour,
compress=False,
),
)
)
)
| 0.000535 |
import numpy as np
from Channel import Channel
from Detector import Detector
class PhotonCountingModule():
"""
Handles the registration of detectors and channels.
Detector and Channel class should only be used through this.
"""
def __init__(self):
"""
Initialize.
"""
self.Detector = Detector
self.Channel = Channel
self._detectors = {}
self._channels = {}
def register_detector(self, name, detector):
"""
Registers a named detector if not already in use.
"""
if name in self._detectors:
raise NameError('Detector name:['+name+'] is already in use.')
else:
self._detectors[name] = detector
def register_channel(self, name, channel):
"""
Registers a named channel if not already in use.
"""
if name in self._channels:
raise NameError('Channel name:['+name+'] is already in use.')
else:
self._channels[name] = channel
def detector(self, name):
"""
return a named detector.
"""
return self._detectors[name]
def channel(self, name):
"""
return a named channel.
"""
return self._channels[name] | 0.042086 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssh_local_key
short_description: SSH proxy local keys in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_ssh feature and local_key category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ssh_local_key:
description:
- SSH proxy local keys.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
name:
description:
- SSH proxy local key name.
required: true
type: str
password:
description:
- Password for SSH private key.
type: str
private_key:
description:
- SSH proxy private key, encrypted with a password.
type: str
public_key:
description:
- SSH proxy public key.
type: str
source:
description:
- SSH proxy local key source type.
type: str
choices:
- built-in
- user
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: SSH proxy local keys.
fortios_firewall_ssh_local_key:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssh_local_key:
name: "default_name_3"
password: "<your_own_value>"
private_key: "<your_own_value>"
public_key: "<your_own_value>"
source: "built-in"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ssh_local_key_data(json):
option_list = ['name', 'password', 'private_key',
'public_key', 'source']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ssh_local_key(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ssh_local_key'] and data['firewall_ssh_local_key']:
state = data['firewall_ssh_local_key']['state']
else:
state = True
firewall_ssh_local_key_data = data['firewall_ssh_local_key']
filtered_data = underscore_to_hyphen(filter_firewall_ssh_local_key_data(firewall_ssh_local_key_data))
if state == "present":
return fos.set('firewall.ssh',
'local-key',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.ssh',
'local-key',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_ssh(data, fos):
if data['firewall_ssh_local_key']:
resp = firewall_ssh_local_key(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ssh_local_key": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"name": {"required": True, "type": "str"},
"password": {"required": False, "type": "str"},
"private_key": {"required": False, "type": "str"},
"public_key": {"required": False, "type": "str"},
"source": {"required": False, "type": "str",
"choices": ["built-in", "user"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 0.001525 |
#!/usr/bin/env python
"""
This node does something called foward kinematics. It reads the motor speeds and publishes a twist representing
what the rover is actually doing.
"""
import rospy
from std_msgs.msg import Int32
import std_msgs.msg
from nav_msgs.msg import Odometry
import numpy as np
import kinematics
import numpy as np
class motor_to_twist:
"""
This class takes in measured motor data (actual wheel movement) and publishes twists as odometry messages.
This data is used by the localization nodes.
"""
def __init__(self, linear_cov_factor, angular_cov_factor):
self.left_positions = list()
self.right_positions = list()
self.linear_cov_factor = linear_cov_factor
self.angular_cov_factor = angular_cov_factor
self.track = rospy.get_param("/rover_constants/wheel_base_width")
self.wheel_diameter = rospy.get_param("/rover_constants/wheel_diameter")
self.encoder_ticks_per_rad = rospy.get_param("/rover_constants/encoder_ticks_per_rad")
rospy.Subscriber("left_motor_in", Int32, self.left_callback)
rospy.Subscriber("right_motor_in", Int32, self.right_callback)
self.pub = rospy.Publisher("twist_publisher", Odometry, queue_size = 1)
self.publish_timer = rospy.Timer(rospy.Duration(0.1), self.publish_data)
def left_callback(self, data):
new_pos = data.data / self.encoder_ticks_per_rad
self.left_positions.append((new_pos, rospy.Time.now()))
def right_callback(self, data):
new_pos = data.data / self.encoder_ticks_per_rad
self.right_positions.append((new_pos, rospy.Time.now()))
def publish_data(self, time_obj):
if len(self.left_positions) <= 10 or len(self.right_positions) <= 10:
return
while self.left_positions[-1][1].to_sec() - self.left_positions[0][1].to_sec() > 1:
self.left_positions.pop(0)
while self.right_positions[-1][1].to_sec() - self.right_positions[0][1].to_sec() > 1:
self.right_positions.pop(0)
if len(self.left_positions) <= 10 or len(self.right_positions) <= 10:
return
out_msg = Odometry()
out_msg.header.stamp = rospy.Time.now()
out_msg.child_frame_id = "base_link"
try:
if len(self.left_positions) > 2 and len(self.right_positions) > 2:
left_vel = self.linear_reg_slope(self.left_positions)
right_vel = self.linear_reg_slope(self.right_positions)
v, omega = kinematics.forward_kinematics(left_vel, right_vel, track=self.track, diameter=self.wheel_diameter)
out_msg.twist.twist.linear.x = v
out_msg.twist.twist.angular.z = omega
# don't use old data
self.left_positions.pop(0)
self.right_positions.pop(0)
# rospy.logerr("motor_to_twist setting twists: " + str(v) + " " + str(omega))
else:
# if no data is being recieved then the motor control node is probably not running. Publish zero velocity.
out_msg.twist.twist.linear.x = 0
out_msg.twist.twist.angular.z = 0
self.set_covariance(out_msg)
self.pub.publish(out_msg)
# rospy.logerr("motor_to_twist published")
except:
pass
def linear_reg_slope(self, data):
""" calculates the first derivative of noisy data with a linear regression """
time = np.array([d[1].to_sec() for d in data])
position = np.array([d[0] for d in data])
m, b = np.polyfit(time, position, 1)
return m
def set_covariance(self, msg):
"""
Twist messages have a covarrinace. The covariance is basically a measure of possible error, very similar
to a standard deviation. For example, if msg.twist.twist.linear.x = 0.5 and msg.twist.covariance[0] = 0.1
would mean that the linear velocity is 0.5 +- 0.1 m/s with a 68% confidence.
"""
linear_factor = abs(msg.twist.twist.linear.x * self.linear_cov_factor)
angular_factor = abs(msg.twist.twist.angular.z * self.angular_cov_factor)
# set the x and y covariance. Set y because the rover might slip sideways if it goes over rough terrain
msg.twist.covariance[0*6 + 0] = linear_factor
msg.twist.covariance[1*6 + 1] = linear_factor
msg.twist.covariance[3*6 + 5] = angular_factor
if __name__ == '__main__':
rospy.init_node('motor_to_twist')
linear_cov_factor = rospy.get_param('~linear_covariance_scale_factor', 0)
angular_cov_factor = rospy.get_param('~angular_covariance_scale_factor', 0)
controller = motor_to_twist(linear_cov_factor, angular_cov_factor)
rospy.spin()
| 0.005407 |
""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| 0.02026 |
from libpandaexpress import BufferedDatagramConnection
from libpandaexpress import SocketAddress
from libpandaexpress import SocketIP
from libpandaexpress import SocketTCP
from libpandaexpress import SocketTCPListen
from libpandaexpress import SocketUDPOutgoing
from libpandaexpress import SocketUDPIncoming
from libpandaexpress import Datagram
import time
SocketIP.InitNetworkDriver();
addr = SocketAddress()
addr.setHost("127.0.0.1",6666)
print addr.getIpPort()
MyConection = BufferedDatagramConnection(0,4096000,4096000,102400);
#help(BufferedDatagramConnection)
MyConection.AddAddress(addr)
dg = Datagram();
dg.addUint8(1)
dg.addUint64(4001)
dg.addUint16(2001)
dg.addUint64(123456)
MyConection.SendMessage(dg);
dg1 = Datagram();
dg1.addUint8(1)
dg1.addUint64(123456)
dg1.addUint64(12340)
dg1.addUint16(1000)
dg1.addUint16(54321)
while 1==1:
MyConection.SendMessage(dg);
##for x in range(120000):
while MyConection.GetMessage():
None
MyConection.Flush();
time.sleep(1)
print "loop"
| 0.014549 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern.six.moves import zip
import copy
import keyword
from . import generic
from . import utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <http://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = 'fits'
def __init__(self):
# Build this on the class, so it only gets generated once.
if '_parser' not in Fits.__dict__:
Fits._parser, Fits._lexer = self._make_parser()
if not '_units' in Fits.__dict__:
Fits._units, Fits._deprecated_units = self._generate_unit_names()
@staticmethod
def _generate_unit_names():
from ... import units as u
names = {}
deprecated_names = set()
bases = [
'm', 'g', 's', 'rad', 'sr', 'K', 'A', 'mol', 'cd',
'Hz', 'J', 'W', 'V', 'N', 'Pa', 'C', 'Ohm', 'S',
'F', 'Wb', 'T', 'H', 'lm', 'lx', 'a', 'yr', 'eV',
'pc', 'Jy', 'mag', 'R', 'bit', 'byte'
]
deprecated_bases = ['G', 'barn']
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
special_cases = {'dbyte': u.Unit('dbyte', 0.1*u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'deg', 'arcmin', 'arcsec', 'mas', 'min', 'h', 'd', 'Ry',
'solMass', 'u', 'solLum', 'solRad', 'AU', 'lyr', 'count',
'ct', 'photon', 'ph', 'pixel', 'pix', 'D', 'Sun', 'chan',
'bin', 'voxel', 'adu', 'beam'
]
deprecated_units = ['erg', 'Angstrom', 'angstrom']
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the FITS standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'FITS',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('fits')
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
from .. import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.scale != 1:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scale. "
"Multiply your data by {0:e}.".format(unit.scale))
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=lambda x: x[1], reverse=True)
s = cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from .. import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
cls.to_string(unit), scale)
return s
| 0.000426 |
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os
import cv2
import numpy as np
from methods import normalize
from . finish_calibration import finish_calibration
from pyglui.cygl.utils import draw_points_norm,RGBA
from glfw import GLFW_PRESS
import audio
from pyglui import ui
from . calibration_plugin_base import Calibration_Plugin
#logging
import logging
logger = logging.getLogger(__name__)
class Natural_Features_Calibration(Calibration_Plugin):
"""Calibrate using natural features in a scene.
Features are selected by a user by clicking on
"""
def __init__(self, g_pool):
super().__init__(g_pool)
self.first_img = None
self.point = None
self.count = 0
self.detected = False
self.pos = None
self.r = 40.0 # radius of circle displayed
self.ref_list = []
self.pupil_list = []
self.menu = None
self.button = None
self.order = .5
def init_gui(self):
self.info = ui.Info_Text("Calibrate gaze parameters using features in your environment. Ask the subject to look at objects in the scene and click on them in the world window.")
self.g_pool.calibration_menu.append(self.info)
self.button = ui.Thumb('active',self,label='C',setter=self.toggle,hotkey='c')
self.button.on_color[:] = (.3,.2,1.,.9)
self.g_pool.quickbar.insert(0,self.button)
def deinit_gui(self):
if self.info:
self.g_pool.calibration_menu.remove(self.info)
self.info = None
if self.button:
self.g_pool.quickbar.remove(self.button)
self.button = None
def toggle(self,_=None):
if self.active:
self.notify_all({'subject':'calibration.should_stop'})
else:
self.notify_all({'subject':'calibration.should_start'})
def start(self):
audio.say("Starting Calibration")
logger.info("Starting Calibration")
self.active = True
self.ref_list = []
self.pupil_list = []
def stop(self):
audio.say("Stopping Calibration")
logger.info("Stopping Calibration")
self.active = False
self.button.status_text = ''
finish_calibration(self.g_pool,self.pupil_list,self.ref_list)
def update(self,frame,events):
if self.active:
recent_pupil_positions = events['pupil_positions']
if self.first_img is None:
self.first_img = frame.gray.copy()
self.detected = False
if self.count:
gray = frame.gray
# in cv2.3 nextPts is falsly required as an argument.
nextPts_dummy = self.point.copy()
nextPts,status, err = cv2.calcOpticalFlowPyrLK(self.first_img,gray,self.point,nextPts_dummy,winSize=(100,100))
if status[0]:
self.detected = True
self.point = nextPts
self.first_img = gray.copy()
nextPts = nextPts[0]
self.pos = normalize(nextPts,(gray.shape[1],gray.shape[0]),flip_y=True)
self.count -=1
ref = {}
ref["screen_pos"] = nextPts
ref["norm_pos"] = self.pos
ref["timestamp"] = frame.timestamp
self.ref_list.append(ref)
#always save pupil positions
for p_pt in recent_pupil_positions:
if p_pt['confidence'] > self.pupil_confidence_threshold:
self.pupil_list.append(p_pt)
if self.count:
self.button.status_text = 'Sampling Gaze Data'
else:
self.button.status_text = 'Click to Sample at Location'
def gl_display(self):
if self.detected:
draw_points_norm([self.pos],size=self.r,color=RGBA(0.,1.,0.,.5))
def on_click(self,pos,button,action):
if action == GLFW_PRESS and self.active:
self.first_img = None
self.point = np.array([pos,],dtype=np.float32)
self.count = 30
def get_init_dict(self):
return {}
def cleanup(self):
"""gets called when the plugin get terminated.
This happens either voluntarily or forced.
if you have an atb bar or glfw window destroy it here.
"""
if self.active:
self.stop()
self.deinit_gui() | 0.011248 |
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.")
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + "\\"
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| 0.002802 |
# Copyright (C) 2013 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
from __future__ import division, print_function
import traceback
from .. import document
from .. import plugins
from .. import qtall as qt4
from . import base
def _(text, disambiguation=None, context="Import_Plugin"):
return qt4.QCoreApplication.translate(context, text, disambiguation)
class ImportParamsPlugin(base.ImportParamsBase):
"""Parameters for import plugins.
Additional parameter:
plugin: name of plugin
Plugins have their own parameters."""
defaults = {
'plugin': None,
}
defaults.update(base.ImportParamsBase.defaults)
def __init__(self, **argsv):
"""Initialise plugin parameters, splitting up default parameters
and plugin parameters."""
pluginpars = {}
upvars = {}
for n, v in argsv.items():
if n in self.defaults:
upvars[n] = v
else:
pluginpars[n] = v
base.ImportParamsBase.__init__(self, **upvars)
self.pluginpars = pluginpars
self._extras.append('pluginpars')
class LinkedFilePlugin(base.LinkedFileBase):
"""Represent a file linked using an import plugin."""
def createOperation(self):
"""Return operation to recreate self."""
return OperationDataImportPlugin
def saveToFile(self, fileobj, relpath=None):
"""Save the link to the vsz document file."""
self._saveHelper(
fileobj,
'ImportFilePlugin',
('plugin', 'filename'),
relpath=relpath,
extraargs=self.params.pluginpars)
class OperationDataImportPlugin(base.OperationDataImportBase):
"""Import data using a plugin."""
descr = _('import using plugin')
def doImport(self):
"""Do import."""
pluginnames = [p.name for p in plugins.importpluginregistry]
plugin = plugins.importpluginregistry[
pluginnames.index(self.params.plugin)]
# if the plugin is a class, make an instance
# the old API is for the plugin to be instances
if isinstance(plugin, type):
plugin = plugin()
# strip out parameters for plugin itself
p = self.params
# set defaults for import plugins
pparams = dict(p.pluginpars)
for field in plugin.fields:
if field.name not in pparams:
pparams[field.name] = field.default
# stick back together the plugin parameter object
plugparams = plugins.ImportPluginParams(
p.filename, p.encoding, pparams)
results = plugin.doImport(plugparams)
# make link for file
LF = None
if p.linked:
LF = LinkedFilePlugin(p)
# convert results to real datasets
for pluginds in results:
# get list of custom definitions to add to results
self.outcustoms += pluginds._customs()
# convert plugin dataset to real one
ds = pluginds._unlinkedVeuszDataset()
if ds is not None:
if p.linked:
ds.linked = LF
# construct name
name = p.prefix + pluginds.name + p.suffix
# actually make dataset
self.outdatasets[name] = ds
def ImportFilePlugin(comm, plugin, filename, **args):
"""Import file using a plugin.
optional arguments:
prefix: add to start of dataset name (default '')
suffix: add to end of dataset name (default '')
linked: link import to file (default False)
encoding: file encoding (may not be used, default 'utf_8')
renames: renamed datasets after import
plus arguments to plugin
returns: list of imported datasets, list of imported customs
"""
realfilename = comm.findFileOnImportPath(filename)
params = ImportParamsPlugin(
plugin=plugin, filename=realfilename, **args)
op = OperationDataImportPlugin(params)
comm.document.applyOperation(op)
return op.outnames, op.outcustoms
document.registerImportCommand(
'ImportFilePlugin', ImportFilePlugin, filenamearg=1)
| 0.001198 |
"""Test the SmartTub light platform."""
import pytest
from smarttub import SpaLight
# the light in light_zone should have initial state light_state. we will call
# service_name with service_params, and expect the resultant call to
# SpaLight.set_mode to have set_mode_args parameters
@pytest.mark.parametrize(
"light_zone,light_state,service_name,service_params,set_mode_args",
[
(1, "off", "turn_on", {}, (SpaLight.LightMode.PURPLE, 50)),
(1, "off", "turn_on", {"brightness": 255}, (SpaLight.LightMode.PURPLE, 100)),
(2, "on", "turn_off", {}, (SpaLight.LightMode.OFF, 0)),
],
)
async def test_light(
spa,
setup_entry,
hass,
light_zone,
light_state,
service_name,
service_params,
set_mode_args,
):
"""Test light entity."""
entity_id = f"light.{spa.brand}_{spa.model}_light_{light_zone}"
state = hass.states.get(entity_id)
assert state is not None
assert state.state == light_state
light: SpaLight = next(
light for light in await spa.get_lights() if light.zone == light_zone
)
await hass.services.async_call(
"light",
service_name,
{"entity_id": entity_id, **service_params},
blocking=True,
)
light.set_mode.assert_called_with(*set_mode_args)
| 0.000771 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
peukerdouglas.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from TauDEMUtils import TauDEMUtils
class PeukerDouglas(GeoAlgorithm):
ELEVATION_GRID = 'ELEVATION_GRID'
CENTER_WEIGHT = 'CENTER_WEIGHT'
SIDE_WEIGHT = 'SIDE_WEIGHT'
DIAGONAL_WEIGHT = 'DIAGONAL_WEIGHT'
STREAM_SOURCE_GRID = 'STREAM_SOURCE_GRID'
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Peuker Douglas')
self.cmdName = 'peukerdouglas'
self.group, self.i18n_group = self.trAlgorithm('Stream Network Analysis tools')
self.addParameter(ParameterRaster(self.ELEVATION_GRID,
self.tr('Elevation Grid'), False))
self.addParameter(ParameterNumber(self.CENTER_WEIGHT,
self.tr('Center Smoothing Weight'), 0, None, 0.4))
self.addParameter(ParameterNumber(self.SIDE_WEIGHT,
self.tr('Side Smoothing Weight'), 0, None, 0.1))
self.addParameter(ParameterNumber(self.DIAGONAL_WEIGHT,
self.tr('Diagonal Smoothing Weight'), 0, None, 0.05))
self.addOutput(OutputRaster(self.STREAM_SOURCE_GRID,
self.tr('Stream Source Grid')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(unicode(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append('-fel')
commands.append(self.getParameterValue(self.ELEVATION_GRID))
commands.append('-par')
commands.append(unicode(self.getParameterValue(self.CENTER_WEIGHT)))
commands.append(unicode(self.getParameterValue(self.SIDE_WEIGHT)))
commands.append(unicode(self.getParameterValue(self.DIAGONAL_WEIGHT)))
commands.append('-ss')
commands.append(self.getOutputValue(self.STREAM_SOURCE_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| 0.001002 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Implementation of SQLAlchemy backend.'''
from datetime import datetime
from datetime import timedelta
import sys
from oslo.config import cfg
from oslo.db.sqlalchemy import session as db_session
from oslo.db.sqlalchemy import utils
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.orm.session import Session
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.db.sqlalchemy import filters as db_filters
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
from heat.rpc import api as rpc_api
CONF = cfg.CONF
CONF.import_opt('max_events_per_stack', 'heat.common.config')
_facade = None
def get_facade():
global _facade
if not _facade:
_facade = db_session.EngineFacade.from_config(CONF)
return _facade
get_engine = lambda: get_facade().get_engine()
get_session = lambda: get_facade().get_session()
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(context, *args):
session = _session(context)
query = session.query(*args)
return query
def soft_delete_aware_query(context, *args, **kwargs):
"""Stack query helper that accounts for context's `show_deleted` field.
:param show_deleted: if True, overrides context's show_deleted field.
"""
query = model_query(context, *args)
show_deleted = kwargs.get('show_deleted') or context.show_deleted
if not show_deleted:
query = query.filter_by(deleted_at=None)
return query
def _session(context):
return (context and context.session) or get_session()
def raw_template_get(context, template_id):
result = model_query(context, models.RawTemplate).get(template_id)
if not result:
raise exception.NotFound(_('raw template with id %s not found') %
template_id)
return result
def raw_template_create(context, values):
raw_template_ref = models.RawTemplate()
raw_template_ref.update(values)
raw_template_ref.save(_session(context))
return raw_template_ref
def raw_template_update(context, template_id, values):
raw_template_ref = raw_template_get(context, template_id)
# get only the changed values
values = dict((k, v) for k, v in values.items()
if getattr(raw_template_ref, k) != v)
if values:
raw_template_ref.update_and_save(values)
return raw_template_ref
def resource_get(context, resource_id):
result = model_query(context, models.Resource).get(resource_id)
if not result:
raise exception.NotFound(_("resource with id %s not found") %
resource_id)
return result
def resource_get_by_name_and_stack(context, resource_name, stack_id):
result = model_query(context, models.Resource).\
filter_by(name=resource_name).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).first()
return result
def resource_get_by_physical_resource_id(context, physical_resource_id):
results = (model_query(context, models.Resource)
.filter_by(nova_instance=physical_resource_id)
.all())
for result in results:
if context is None or context.tenant_id in (
result.stack.tenant, result.stack.stack_user_project_id):
return result
return None
def resource_get_all(context):
results = model_query(context, models.Resource).all()
if not results:
raise exception.NotFound(_('no resources were found'))
return results
def resource_data_get_all(resource, data=None):
"""
Looks up resource_data by resource.id. If data is encrypted,
this method will decrypt the results.
"""
if data is None:
data = (model_query(resource.context, models.ResourceData)
.filter_by(resource_id=resource.id))
if not data:
raise exception.NotFound(_('no resource data found'))
ret = {}
for res in data:
if res.redact:
ret[res.key] = _decrypt(res.value, res.decrypt_method)
else:
ret[res.key] = res.value
return ret
def resource_data_get(resource, key):
"""Lookup value of resource's data by key. Decrypts resource data if
necessary.
"""
result = resource_data_get_by_key(resource.context,
resource.id,
key)
if result.redact:
return _decrypt(result.value, result.decrypt_method)
return result.value
def _encrypt(value):
if value is not None:
return crypt.encrypt(value.encode('utf-8'))
else:
return None, None
def _decrypt(enc_value, method):
if method is None:
return None
decryptor = getattr(crypt, method)
value = decryptor(enc_value)
if value is not None:
return unicode(value, 'utf-8')
def resource_data_get_by_key(context, resource_id, key):
"""Looks up resource_data by resource_id and key. Does not unencrypt
resource_data.
"""
result = (model_query(context, models.ResourceData)
.filter_by(resource_id=resource_id)
.filter_by(key=key).first())
if not result:
raise exception.NotFound(_('No resource data found'))
return result
def resource_data_set(resource, key, value, redact=False):
"""Save resource's key/value pair to database."""
if redact:
method, value = _encrypt(value)
else:
method = ''
try:
current = resource_data_get_by_key(resource.context, resource.id, key)
except exception.NotFound:
current = models.ResourceData()
current.key = key
current.resource_id = resource.id
current.redact = redact
current.value = value
current.decrypt_method = method
current.save(session=resource.context.session)
return current
def resource_exchange_stacks(context, resource_id1, resource_id2):
query = model_query(context, models.Resource)
session = query.session
session.begin()
res1 = query.get(resource_id1)
res2 = query.get(resource_id2)
res1.stack, res2.stack = res2.stack, res1.stack
session.commit()
def resource_data_delete(resource, key):
result = resource_data_get_by_key(resource.context, resource.id, key)
result.delete()
def resource_create(context, values):
resource_ref = models.Resource()
resource_ref.update(values)
resource_ref.save(_session(context))
return resource_ref
def resource_get_all_by_stack(context, stack_id):
results = model_query(context, models.Resource).\
filter_by(stack_id=stack_id).\
options(orm.joinedload("data")).all()
if not results:
raise exception.NotFound(_("no resources for stack_id %s were found")
% stack_id)
return dict((res.name, res) for res in results)
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name).\
filter_by(owner_id=owner_id)
return query.first()
def stack_get_by_name(context, stack_name):
query = soft_delete_aware_query(context, models.Stack).\
filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id
)).\
filter_by(name=stack_name)
return query.first()
def stack_get(context, stack_id, show_deleted=False, tenant_safe=True,
eager_load=False):
query = model_query(context, models.Stack)
if eager_load:
query = query.options(orm.joinedload("raw_template"))
result = query.get(stack_id)
deleted_ok = show_deleted or context.show_deleted
if result is None or result.deleted_at is not None and not deleted_ok:
return None
# One exception to normal project scoping is users created by the
# stacks in the stack_user_project_id (in the heat stack user domain)
if (tenant_safe and result is not None and context is not None and
context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
return None
return result
def stack_get_all_by_owner_id(context, owner_id):
results = soft_delete_aware_query(context, models.Stack).\
filter_by(owner_id=owner_id).all()
return results
def _get_sort_keys(sort_keys, mapping):
'''Returns an array containing only whitelisted keys
:param sort_keys: an array of strings
:param mapping: a mapping from keys to DB column names
:returns: filtered list of sort keys
'''
if isinstance(sort_keys, basestring):
sort_keys = [sort_keys]
return [mapping[key] for key in sort_keys or [] if key in mapping]
def _paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
model_marker = model_query(context, model).get(marker)
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
raise exception.Invalid(reason=exc.message)
return query
def _query_stack_get_all(context, tenant_safe=True, show_deleted=False,
show_nested=False):
if show_nested:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(backup=False)
else:
query = soft_delete_aware_query(context, models.Stack,
show_deleted=show_deleted).\
filter_by(owner_id=None)
if tenant_safe:
query = query.filter_by(tenant=context.tenant_id)
return query
def stack_get_all(context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False):
query = _query_stack_get_all(context, tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested)
return _filter_and_page_query(context, query, limit, sort_keys,
marker, sort_dir, filters).all()
def _filter_and_page_query(context, query, limit=None, sort_keys=None,
marker=None, sort_dir=None, filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.STACK_NAME: models.Stack.name.key,
rpc_api.STACK_STATUS: models.Stack.status.key,
rpc_api.STACK_CREATION_TIME: models.Stack.created_at.key,
rpc_api.STACK_UPDATED_TIME: models.Stack.updated_at.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Stack, filters)
return _paginate_query(context, query, models.Stack, limit,
whitelisted_sort_keys, marker, sort_dir)
def stack_count_all(context, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False):
query = _query_stack_get_all(context, tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested)
query = db_filters.exact_filter(query, models.Stack, filters)
return query.count()
def stack_create(context, values):
stack_ref = models.Stack()
stack_ref.update(values)
stack_ref.save(_session(context))
return stack_ref
def stack_update(context, stack_id, values):
stack = stack_get(context, stack_id)
if not stack:
raise exception.NotFound(_('Attempt to update a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
stack.update(values)
stack.save(_session(context))
def stack_delete(context, stack_id):
s = stack_get(context, stack_id)
if not s:
raise exception.NotFound(_('Attempt to delete a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
session = Session.object_session(s)
for r in s.resources:
session.delete(r)
s.soft_delete(session=session)
session.flush()
def stack_lock_create(stack_id, engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
if lock is not None:
return lock.engine_id
session.add(models.StackLock(stack_id=stack_id, engine_id=engine_id))
def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=old_engine_id).\
update({"engine_id": new_engine_id})
if not rows_affected:
return lock.engine_id if lock is not None else True
def stack_lock_release(stack_id, engine_id):
session = get_session()
with session.begin():
rows_affected = session.query(models.StackLock).\
filter_by(stack_id=stack_id, engine_id=engine_id).\
delete()
if not rows_affected:
return True
def user_creds_create(context):
values = context.to_dict()
user_creds_ref = models.UserCreds()
if values.get('trust_id'):
method, trust_id = _encrypt(values.get('trust_id'))
user_creds_ref.trust_id = trust_id
user_creds_ref.decrypt_method = method
user_creds_ref.trustor_user_id = values.get('trustor_user_id')
user_creds_ref.username = None
user_creds_ref.password = None
user_creds_ref.tenant = values.get('tenant')
user_creds_ref.tenant_id = values.get('tenant_id')
else:
user_creds_ref.update(values)
method, password = _encrypt(values['password'])
user_creds_ref.password = password
user_creds_ref.decrypt_method = method
user_creds_ref.save(_session(context))
return user_creds_ref
def user_creds_get(user_creds_id):
db_result = model_query(None, models.UserCreds).get(user_creds_id)
if db_result is None:
return None
# Return a dict copy of db results, do not decrypt details into db_result
# or it can be committed back to the DB in decrypted form
result = dict(db_result)
del result['decrypt_method']
result['password'] = _decrypt(result['password'], db_result.decrypt_method)
result['trust_id'] = _decrypt(result['trust_id'], db_result.decrypt_method)
return result
def user_creds_delete(context, user_creds_id):
creds = model_query(context, models.UserCreds).get(user_creds_id)
if not creds:
raise exception.NotFound(
_('Attempt to delete user creds with id '
'%(id)s that does not exist') % {'id': user_creds_id})
session = Session.object_session(creds)
session.delete(creds)
session.flush()
def event_get(context, event_id):
result = model_query(context, models.Event).get(event_id)
return result
def event_get_all(context):
stacks = soft_delete_aware_query(context, models.Stack)
stack_ids = [stack.id for stack in stacks]
results = model_query(context, models.Event).\
filter(models.Event.stack_id.in_(stack_ids)).all()
return results
def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = model_query(context, models.Event)
query = db_filters.exact_filter(query, models.Event, filters)
query = query.join(models.Event.stack).\
filter_by(tenant=context.tenant_id).filter_by(deleted_at=None)
filters = None
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _query_all_by_stack(context, stack_id):
query = model_query(context, models.Event).\
filter_by(stack_id=stack_id)
return query
def event_get_all_by_stack(context, stack_id, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = _query_all_by_stack(context, stack_id)
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _events_paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
# not to use model_query(context, model).get(marker), because
# user can only see the ID(column 'uuid') and the ID as the marker
model_marker = model_query(context, model).filter_by(uuid=marker).\
first()
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
raise exception.Invalid(reason=exc.message)
return query
def _events_filter_and_page_query(context, query,
limit=None, marker=None,
sort_keys=None, sort_dir=None,
filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.EVENT_TIMESTAMP: models.Event.created_at.key,
rpc_api.EVENT_RES_TYPE: models.Event.resource_type.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Event, filters)
return _events_paginate_query(context, query, models.Event, limit,
whitelisted_sort_keys, marker, sort_dir)
def event_count_all_by_stack(context, stack_id):
return _query_all_by_stack(context, stack_id).count()
def _delete_event_rows(context, stack_id, limit):
# MySQL does not support LIMIT in subqueries,
# sqlite does not support JOIN in DELETE.
# So we must manually supply the IN() values.
# pgsql SHOULD work with the pure DELETE/JOIN below but that must be
# confirmed via integration tests.
query = _query_all_by_stack(context, stack_id)
session = _session(context)
ids = [r.id for r in query.order_by(
models.Event.id).limit(limit).all()]
q = session.query(models.Event).filter(
models.Event.id.in_(ids))
return q.delete(synchronize_session='fetch')
def event_create(context, values):
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
if ((event_count_all_by_stack(context, values['stack_id']) >=
cfg.CONF.max_events_per_stack)):
# prune
_delete_event_rows(
context, values['stack_id'], cfg.CONF.event_purge_batch_size)
event_ref = models.Event()
event_ref.update(values)
event_ref.save(_session(context))
return event_ref
def watch_rule_get(context, watch_rule_id):
result = model_query(context, models.WatchRule).get(watch_rule_id)
return result
def watch_rule_get_by_name(context, watch_rule_name):
result = model_query(context, models.WatchRule).\
filter_by(name=watch_rule_name).first()
return result
def watch_rule_get_all(context):
results = model_query(context, models.WatchRule).all()
return results
def watch_rule_get_all_by_stack(context, stack_id):
results = model_query(context, models.WatchRule).\
filter_by(stack_id=stack_id).all()
return results
def watch_rule_create(context, values):
obj_ref = models.WatchRule()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def watch_rule_update(context, watch_id, values):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to update a watch with id: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
wr.update(values)
wr.save(_session(context))
def watch_rule_delete(context, watch_id):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to delete watch_rule: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
session = Session.object_session(wr)
for d in wr.watch_data:
session.delete(d)
session.delete(wr)
session.flush()
def watch_data_create(context, values):
obj_ref = models.WatchData()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def watch_data_get_all(context):
results = model_query(context, models.WatchData).all()
return results
def software_config_create(context, values):
obj_ref = models.SoftwareConfig()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def software_config_get(context, config_id):
result = model_query(context, models.SoftwareConfig).get(config_id)
if (result is not None and context is not None and
result.tenant != context.tenant_id):
result = None
if not result:
raise exception.NotFound(_('Software config with id %s not found') %
config_id)
return result
def software_config_delete(context, config_id):
config = software_config_get(context, config_id)
session = Session.object_session(config)
session.delete(config)
session.flush()
def software_deployment_create(context, values):
obj_ref = models.SoftwareDeployment()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def software_deployment_get(context, deployment_id):
result = model_query(context, models.SoftwareDeployment).get(deployment_id)
if (result is not None and context is not None and
context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
result = None
if not result:
raise exception.NotFound(_('Deployment with id %s not found') %
deployment_id)
return result
def software_deployment_get_all(context, server_id=None):
sd = models.SoftwareDeployment
query = model_query(context, sd).\
filter(sqlalchemy.or_(
sd.tenant == context.tenant_id,
sd.stack_user_project_id == context.tenant_id
)).\
order_by(sd.created_at)
if server_id:
query = query.filter_by(server_id=server_id)
return query.all()
def software_deployment_update(context, deployment_id, values):
deployment = software_deployment_get(context, deployment_id)
deployment.update(values)
deployment.save(_session(context))
return deployment
def software_deployment_delete(context, deployment_id):
deployment = software_deployment_get(context, deployment_id)
session = Session.object_session(deployment)
session.delete(deployment)
session.flush()
def snapshot_create(context, values):
obj_ref = models.Snapshot()
obj_ref.update(values)
obj_ref.save(_session(context))
return obj_ref
def snapshot_get(context, snapshot_id):
result = model_query(context, models.Snapshot).get(snapshot_id)
if (result is not None and context is not None and
context.tenant_id != result.tenant):
result = None
if not result:
raise exception.NotFound(_('Snapshot with id %s not found') %
snapshot_id)
return result
def snapshot_update(context, snapshot_id, values):
snapshot = snapshot_get(context, snapshot_id)
snapshot.update(values)
snapshot.save(_session(context))
return snapshot
def snapshot_delete(context, snapshot_id):
snapshot = snapshot_get(context, snapshot_id)
session = Session.object_session(snapshot)
session.delete(snapshot)
session.flush()
def snapshot_get_all(context, stack_id):
return model_query(context, models.Snapshot).filter_by(
stack_id=stack_id, tenant=context.tenant_id)
def purge_deleted(age, granularity='days'):
try:
age = int(age)
except ValueError:
raise exception.Error(_("age should be an integer"))
if age < 0:
raise exception.Error(_("age should be a positive integer"))
if granularity not in ('days', 'hours', 'minutes', 'seconds'):
raise exception.Error(
_("granularity should be days, hours, minutes, or seconds"))
if granularity == 'days':
age = age * 86400
elif granularity == 'hours':
age = age * 3600
elif granularity == 'minutes':
age = age * 60
time_line = datetime.now() - timedelta(seconds=age)
engine = get_engine()
meta = sqlalchemy.MetaData()
meta.bind = engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
event = sqlalchemy.Table('event', meta, autoload=True)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
stmt = sqlalchemy.select([stack.c.id,
stack.c.raw_template_id,
stack.c.user_creds_id]).\
where(stack.c.deleted_at < time_line)
deleted_stacks = engine.execute(stmt)
for s in deleted_stacks:
event_del = event.delete().where(event.c.stack_id == s[0])
engine.execute(event_del)
stack_del = stack.delete().where(stack.c.id == s[0])
engine.execute(stack_del)
raw_template_del = raw_template.delete().\
where(raw_template.c.id == s[1])
engine.execute(raw_template_del)
user_creds_del = user_creds.delete().where(user_creds.c.id == s[2])
engine.execute(user_creds_del)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return migration.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return migration.db_version(engine)
| 0.000107 |
#!/usr/bin/env python2.7
## JOHN URBAN (2015,2016)
import h5py, os, sys
import cStringIO as StringIO
from Bio import SeqIO
from fast5tools.f5class import *
from fast5tools.f5ops import *
import argparse
from glob import glob
import matplotlib
## may need following line for remote jobs (e.g. submitting batch scripts)
##matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
##from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fast5 file(s) and/or directories of fast5s, return desired plot given x and y.
1 = base_info_name
2 = molecule length
3 = has complement
4 = has 2d
5 = 2d seq len
6 = template seq len
7 = complement seq len
8 = 2d mean q score
9 = template mean q score
10 = complement mean q score
11 = num input events
12 = num template events
13 = num complement events
14 = num called template events
15 = num called complement events
16 = num skips in template
17 = num skips in complement
18 = fast5 filename (path as given)
19 = fast5 filename (absolute path)
John Urban (2015, 2016)
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fast5', metavar='fast5', nargs='+',
type= str,
help='''Paths to as many fast5 files and/or directories filled with fast5 files as you want.
Assumes all fast5 files have '.fast5' extension.
If inside dir of dirs with .fast5 files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-x', '--x', type=int, default=5,
help='''Provide integer corresponding to what information is on x-axis.''')
parser.add_argument('-y', '--y', type=int, default=8,
help='''Provide integer corresponding to what information is on y-axis.''')
parser.add_argument('-t', '--title', type=str, default=None,
help='''Provide title.''')
pparser.add_argument('--notarlite', action='store_true', default=False, help=''' The default methof (called tarlite) extracts 1 file from a given tarchive at a time, processes, and deletes it.
This options says to turn tarlite off resulting in extracting entire tarchive before proceeding (and finally deleting).
It is possible that --notarlite is faster, but at the expense of exceeding file number limits or disk storage quotas.
Nonetheless, the difference in speed is a lot smaller than the difference in space needed.
For example, not using tarlite will require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
A corollary is that tarlite just needs to be allowed to form 1 (or a few) files compared to what could be thousands to millions.
''')
parser.add_argument('--tarlite', action='store_true', default=False, help='''This legacy option is outdated.
However, it is kept here to avoid breaking pipelines that make use of it.
The tarlite approach is now default. Specifying this will not change that default behavior.
It will just prevent pipelines from breaking.
However, not specifying this will still also result in the tarlite approach.
Use --notarlite to turn it off.''')
args = parser.parse_args()
#################################################
## deal with some of the arguments
#################################################
num_f5cmds = len(f5fxn.keys())
safe_keys = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
assert args.x in safe_keys
assert args.y in safe_keys
names = {}
names[2] = 'Molecule Length'
names[3] = 'Has Complement'
names[4] = 'Has 2D'
names[5] = '2D SeqLen'
names[6] = 'Template SeqLen'
names[7] = 'Complement SeqLen'
names[8] = '2D Mean q-score'
names[9] = 'Template Mean q-score'
names[10] = 'Complement Mean q-score'
names[11] = 'Number of Input Events'
names[12] = 'Number of Template Events'
names[13] = 'Number of Complement Events'
names[14] = 'Number of Called Template Events'
names[15] = 'Number of Called Complement Events'
names[16] = 'Number of Skips in Template'
names[17] = 'Number of Skips in Complement'
def get_fast5_data(f5cmd, f5):
try:
return float(f5fxn[f5cmd](f5))
except:
return None
def make_title(x,y, names):
return names[x] + " Vs. " + names[y]
#################################################
#### EXECUTE @@@@@@@@@@@@
#################################################
##TODO:
## Also make plotting from fast5totable files
if __name__ == "__main__":
if args.title is None:
args.title = make_title(args.x, args.y, names=names)
x = []
y = []
for f5 in Fast5List(args.fast5, keep_tar_footprint_small=(not args.notarlite)):
x.append( get_fast5_data(args.x, f5) )
y.append( get_fast5_data(args.y, f5) )
print x
print y
## will need to process those with "-"
plt.title(args.title)
plt.xlabel(names[args.x])
plt.ylabel(names[args.y])
plt.scatter(x,y)
plt.show()
| 0.011749 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import datetime
import re
import inspect
import sys
from collections.abc import Mapping
from functools import partial, wraps, reduce
from typing import (
Any,
Callable,
Dict,
Generic,
IO,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
no_type_check,
overload,
TYPE_CHECKING,
)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import is_list_like, is_hashable
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.types import (
ArrayType,
BooleanType,
DataType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
LongType,
NumericType,
Row,
StructType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, Label, Name, Scalar, T
from pyspark.pandas.accessors import PandasOnSparkSeriesMethods
from pyspark.pandas.categorical import CategoricalAccessor
from pyspark.pandas.config import get_option
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.exceptions import SparkPandasIndexingError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
from pyspark.pandas.ml import corr
from pyspark.pandas.utils import (
combine_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
verify_temp_column_name,
SPARK_CONF_ARROW_ENABLED,
)
from pyspark.pandas.datetimes import DatetimeMethods
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkSeriesMethods
from pyspark.pandas.strings import StringMethods
from pyspark.pandas.typedef import (
infer_return_type,
spark_type_to_pandas_dtype,
ScalarType,
SeriesType,
)
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import SeriesGroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``
Parameters
----------
other : Series or scalar value
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.add(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
>>> df.a.radd(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rsub(df.b)
a 0.0
b NaN
c -2.0
d NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
>>> df.a.rmul(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rdiv(df.b)
a 1.0
b NaN
c 0.5
d NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.pow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
>>> df.a.rpow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.mod(df.b)
a 0.0
b NaN
c 0.0
d NaN
dtype: float64
>>> df.a.rmod(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.floordiv(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rfloordiv(df.b)
a 1.0
b NaN
c 0.0
d NaN
dtype: float64
"""
# Needed to disambiguate Series.str and str type
str_type = str
def _create_type_for_series_type(param: Any) -> Type[SeriesType]:
from pyspark.pandas.typedef import NameTypeHolder
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {}) # type: Type[NameTypeHolder]
new_class.tpe = param
else:
new_class = param.type if isinstance(param, np.dtype) else param
return SeriesType[new_class] # type: ignore
if (3, 5) <= sys.version_info < (3, 7) and __name__ != "__main__":
from typing import GenericMeta # type: ignore
old_getitem = GenericMeta.__getitem__ # type: ignore
@no_type_check
def new_getitem(self, params):
if hasattr(self, "is_series"):
return old_getitem(self, _create_type_for_series_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class Series(Frame, IndexOpsMixin, Generic[T]):
"""
pandas-on-Spark Series that corresponds to pandas Series logically. This holds Spark Column
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
:ivar _psdf: Parent's pandas-on-Spark DataFrame
:type _psdf: ps.DataFrame
Parameters
----------
data : array-like, dict, or scalar value, pandas Series
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas Series, other arguments should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
@no_type_check
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
assert data is not None
if isinstance(data, DataFrame):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._anchor = data # type: DataFrame
self._col_label = index # type: Label
else:
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert not fastpath
s = data
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
internal = InternalFrame.from_pandas(pd.DataFrame(s))
if s.name is None:
internal = internal.copy(column_labels=[None])
anchor = DataFrame(internal)
self._anchor = anchor
self._col_label = anchor._internal.column_labels[0]
object.__setattr__(anchor, "_psseries", {self._column_label: self})
@property
def _psdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
return self._psdf._internal.select_column(self._column_label)
@property
def _column_label(self) -> Optional[Label]:
return self._col_label
def _update_anchor(self, psdf: DataFrame) -> None:
assert psdf._internal.column_labels == [self._column_label], (
psdf._internal.column_labels,
[self._column_label],
)
self._anchor = psdf
object.__setattr__(psdf, "_psseries", {self._column_label: self})
def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> "Series":
"""
Copy pandas-on-Spark Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
name = name_like_string(self._column_label)
internal = self._internal.copy(
data_spark_columns=[scol.alias(name)],
data_fields=[
field if field is None or field.struct_field is None else field.copy(name=name)
],
)
return first_series(DataFrame(internal))
spark = CachedAccessor("spark", SparkSeriesMethods)
@property
def dtypes(self) -> Dtype:
"""Return the dtype object of the underlying data.
>>> s = ps.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def axes(self) -> List["Index"]:
"""
Return a list of the row axis labels.
Examples
--------
>>> psser = ps.Series([1, 2, 3])
>>> psser.axes
[Int64Index([0, 1, 2], dtype='int64')]
"""
return [self.index]
# Arithmetic Operators
def add(self, other: Any) -> "Series":
return self + other
add.__doc__ = _flex_doc_SERIES.format(
desc="Addition",
op_name="+",
equiv="series + other",
reverse="radd",
series_examples=_add_example_SERIES,
)
def radd(self, other: Any) -> "Series":
return other + self
radd.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Addition",
op_name="+",
equiv="other + series",
reverse="add",
series_examples=_add_example_SERIES,
)
def div(self, other: Any) -> "Series":
return self / other
div.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rdiv",
series_examples=_div_example_SERIES,
)
divide = div
def rdiv(self, other: Any) -> "Series":
return other / self
rdiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="div",
series_examples=_div_example_SERIES,
)
def truediv(self, other: Any) -> "Series":
return self / other
truediv.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rtruediv",
series_examples=_div_example_SERIES,
)
def rtruediv(self, other: Any) -> "Series":
return other / self
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="truediv",
series_examples=_div_example_SERIES,
)
def mul(self, other: Any) -> "Series":
return self * other
mul.__doc__ = _flex_doc_SERIES.format(
desc="Multiplication",
op_name="*",
equiv="series * other",
reverse="rmul",
series_examples=_mul_example_SERIES,
)
multiply = mul
def rmul(self, other: Any) -> "Series":
return other * self
rmul.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Multiplication",
op_name="*",
equiv="other * series",
reverse="mul",
series_examples=_mul_example_SERIES,
)
def sub(self, other: Any) -> "Series":
return self - other
sub.__doc__ = _flex_doc_SERIES.format(
desc="Subtraction",
op_name="-",
equiv="series - other",
reverse="rsub",
series_examples=_sub_example_SERIES,
)
subtract = sub
def rsub(self, other: Any) -> "Series":
return other - self
rsub.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Subtraction",
op_name="-",
equiv="other - series",
reverse="sub",
series_examples=_sub_example_SERIES,
)
def mod(self, other: Any) -> "Series":
return self % other
mod.__doc__ = _flex_doc_SERIES.format(
desc="Modulo",
op_name="%",
equiv="series % other",
reverse="rmod",
series_examples=_mod_example_SERIES,
)
def rmod(self, other: Any) -> "Series":
return other % self
rmod.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Modulo",
op_name="%",
equiv="other % series",
reverse="mod",
series_examples=_mod_example_SERIES,
)
def pow(self, other: Any) -> "Series":
return self ** other
pow.__doc__ = _flex_doc_SERIES.format(
desc="Exponential power of series",
op_name="**",
equiv="series ** other",
reverse="rpow",
series_examples=_pow_example_SERIES,
)
def rpow(self, other: Any) -> "Series":
return other ** self
rpow.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Exponential power",
op_name="**",
equiv="other ** series",
reverse="pow",
series_examples=_pow_example_SERIES,
)
def floordiv(self, other: Any) -> "Series":
return self // other
floordiv.__doc__ = _flex_doc_SERIES.format(
desc="Integer division",
op_name="//",
equiv="series // other",
reverse="rfloordiv",
series_examples=_floordiv_example_SERIES,
)
def rfloordiv(self, other: Any) -> "Series":
return other // self
rfloordiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Integer division",
op_name="//",
equiv="other // series",
reverse="floordiv",
series_examples=_floordiv_example_SERIES,
)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkSeriesMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkSeriesMethods)
# Comparison Operators
def eq(self, other: Any) -> bool:
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a == 1
a True
b False
c False
d False
Name: a, dtype: bool
>>> df.b.eq(1)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self == other
equals = eq
def gt(self, other: Any) -> "Series":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a > 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.gt(1)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self > other
def ge(self, other: Any) -> "Series":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a >= 2
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ge(2)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self >= other
def lt(self, other: Any) -> "Series":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a < 1
a False
b False
c False
d False
Name: a, dtype: bool
>>> df.b.lt(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self < other
def le(self, other: Any) -> "Series":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a <= 2
a True
b True
c False
d False
Name: a, dtype: bool
>>> df.b.le(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self <= other
def ne(self, other: Any) -> "Series":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a != 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ne(1)
a False
b True
c False
d True
Name: b, dtype: bool
"""
return self != other
def divmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `divmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.rdivmod
"""
return self.floordiv(other), self.mod(other)
def rdivmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `rdivmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.divmod
"""
return self.rfloordiv(other), self.rmod(other)
def between(self, left: Any, right: Any, inclusive: bool = True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = ps.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = ps.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg: Union[Dict, Callable]) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ps.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(SF.lit(False), SF.lit(None).cast(self.spark.data_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self.spark.column == SF.lit(to_replace), value)
is_start = False
else:
current = current.when(self.spark.column == SF.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(SF.lit(tmp_val))
else:
current = current.otherwise(SF.lit(None).cast(self.spark.data_type))
return self._with_new_scol(current)
else:
return self.apply(arg)
@property
def shape(self) -> Tuple[int]:
"""Return a tuple of the shape of the underlying data."""
return (len(self),)
@property
def name(self) -> Name:
"""Return name of the Series."""
name = self._column_label
if name is not None and len(name) == 1:
return name[0]
else:
return name
@name.setter
def name(self, name: Name) -> None:
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index: Optional[Name] = None, **kwargs: Any) -> "Series":
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
pass
elif not is_hashable(index):
raise TypeError("Series.name must be a hashable type")
elif not isinstance(index, tuple):
index = (index,)
name = name_like_string(index)
scol = self.spark.column.alias(name)
field = self._internal.data_fields[0].copy(name=name)
internal = self._internal.copy(
column_labels=[index],
data_spark_columns=[scol],
data_fields=[field],
column_label_names=None,
)
psdf = DataFrame(internal) # type: DataFrame
if kwargs.get("inplace", False):
self._col_label = index
self._update_anchor(psdf)
return self
else:
return first_series(psdf)
def rename_axis(
self, mapper: Optional[Any] = None, index: Optional[Any] = None, inplace: bool = False
) -> Optional["Series"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper, index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to the index values.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series.
Returns
-------
Series, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = ps.Series(["dog", "cat", "monkey"], name="animal")
>>> s # doctest: +NORMALIZE_WHITESPACE
0 dog
1 cat
2 monkey
Name: animal, dtype: object
>>> s.rename_axis("index").sort_index() # doctest: +NORMALIZE_WHITESPACE
index
0 dog
1 cat
2 monkey
Name: animal, dtype: object
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> s = ps.Series([4, 4, 2], index=index, name='num_legs')
>>> s # doctest: +NORMALIZE_WHITESPACE
type name
mammal dog 4
cat 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
class name
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
TYPE NAME
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
"""
psdf = self.to_frame().rename_axis(mapper=mapper, index=index, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
@property
def index(self) -> "ps.Index":
"""The index (axis labels) Column of the Series.
See Also
--------
Index
"""
return self._psdf.index
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ps.Series([1, 2, 3]).is_unique
True
>>> ps.Series([1, 2, 2]).is_unique
False
>>> ps.Series([1, 2, 3, None]).is_unique
True
"""
scol = self.spark.column
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return self._internal.spark_frame.select(
(F.count(scol) == F.countDistinct(scol))
& (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(
self,
level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,
drop: bool = False,
name: Optional[Name] = None,
inplace: bool = False,
) -> Optional[Union["Series", DataFrame]]:
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx 0
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace and not drop:
raise TypeError("Cannot reset_index inplace on a Series to create a DataFrame")
if drop:
psdf = self._psdf[[self.name]]
else:
psser = self
if name is not None:
psser = psser.rename(name)
psdf = psser.to_frame()
psdf = psdf.reset_index(level=level, drop=drop)
if drop:
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
else:
return psdf
def to_frame(self, name: Optional[Name] = None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = ps.Series(["a", "b", "c"])
>>> s.to_frame()
0
0 a
1 b
2 c
>>> s = ps.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is not None:
renamed = self.rename(name)
elif self._column_label is None:
renamed = self.rename(DEFAULT_SERIES_NAME)
else:
renamed = self
return DataFrame(renamed._internal)
to_dataframe = to_frame
def to_string(
self,
buf: Optional[IO[str]] = None,
na_rep: str = "NaN",
float_format: Optional[Callable[[float], str]] = None,
header: bool = True,
index: bool = True,
length: bool = False,
dtype: bool = False,
name: bool = False,
max_rows: Optional[int] = None,
) -> Optional[str]:
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psseries = self.head(max_rows)
else:
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args
)
def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args
)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into: Type = dict) -> Mapping:
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args
)
def to_latex(
self,
buf: Optional[IO[str]] = None,
columns: Optional[List[Name]] = None,
col_space: Optional[int] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
bold_rows: bool = False,
column_format: Optional[str] = None,
longtable: Optional[bool] = None,
escape: Optional[bool] = None,
encoding: Optional[str] = None,
decimal: str = ".",
multicolumn: Optional[bool] = None,
multicolumn_format: Optional[str] = None,
multirow: Optional[bool] = None,
) -> Optional[str]:
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args
)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self) -> pd.Series:
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return self._to_internal_pandas().copy()
def to_list(self) -> List:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self._to_internal_pandas().tolist()
tolist = to_list
def drop_duplicates(self, keep: str = "first", inplace: bool = False) -> Optional["Series"]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
Examples
--------
Generate a Series with duplicated entries.
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s.sort_index()
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates().sort_index()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last').sort_index()
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s.sort_index()
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].drop_duplicates(keep=keep)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def reindex(self, index: Optional[Any] = None, fill_value: Optional[Any] = None) -> "Series":
"""
Conform Series to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced.
Parameters
----------
index: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
Series with changed index.
See Also
--------
Series.reset_index : Remove row labels or move them to new columns.
Examples
--------
Create a series with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> ser = ps.Series([200, 200, 404, 404, 301],
... index=index, name='http_status')
>>> ser
Firefox 200
Chrome 200
Safari 404
IE10 404
Konqueror 301
Name: http_status, dtype: int64
Create a new index and reindex the Series. By default
values in the new index that do not have corresponding
records in the Series are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> ser.reindex(new_index).sort_index()
Chrome 200.0
Comodo Dragon NaN
IE10 404.0
Iceweasel NaN
Safari 404.0
Name: http_status, dtype: float64
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> ser.reindex(new_index, fill_value=0).sort_index()
Chrome 200
Comodo Dragon 0
IE10 404
Iceweasel 0
Safari 404
Name: http_status, dtype: int64
To further illustrate the filling functionality in
``reindex``, we will create a Series with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> ser2 = ps.Series([100, 101, np.nan, 100, 89, 88],
... name='prices', index=date_index)
>>> ser2.sort_index()
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Name: prices, dtype: float64
Suppose we decide to expand the series to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> ser2.reindex(date_index2).sort_index()
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Name: prices, dtype: float64
"""
return first_series(self.to_frame().reindex(index=index, fill_value=fill_value)).rename(
self.name
)
def reindex_like(self, other: Union["Series", "DataFrame"]) -> "Series":
"""
Return a Series with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index.
Parameters
----------
other : Series or DataFrame
Its row and column indices are used to define the new indices
of this object.
Returns
-------
Series
Series with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, ...)``.
Examples
--------
>>> s1 = ps.Series([24.3, 31.0, 22.0, 35.0],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'),
... name="temp_celsius")
>>> s1
2014-02-12 24.3
2014-02-13 31.0
2014-02-14 22.0
2014-02-15 35.0
Name: temp_celsius, dtype: float64
>>> s2 = ps.Series(["low", "low", "medium"],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']),
... name="winspeed")
>>> s2
2014-02-12 low
2014-02-13 low
2014-02-15 medium
Name: winspeed, dtype: object
>>> s2.reindex_like(s1).sort_index()
2014-02-12 low
2014-02-13 low
2014-02-14 None
2014-02-15 medium
Name: winspeed, dtype: object
"""
if isinstance(other, (Series, DataFrame)):
return self.reindex(index=other.index)
else:
raise TypeError("other must be a pandas-on-Spark Series or DataFrame")
def fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> Optional["Series"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ps.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
We can also propagate non-null values forward or backward.
>>> s.fillna(method='ffill')
0 NaN
1 2.0
2 3.0
3 4.0
4 4.0
5 6.0
Name: x, dtype: float64
>>> s = ps.Series([np.nan, 'a', 'b', 'c', np.nan], name='x')
>>> s.fillna(method='ffill')
0 None
1 a
2 b
3 c
4 c
Name: x, dtype: object
"""
psser = self._fillna(value=value, method=method, axis=axis, limit=limit)
if method is not None:
psser = DataFrame(psser._psdf._internal.resolved_copy)._psser_for(self._column_label)
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._psdf._update_internal_frame(psser._psdf._internal, requires_same_anchor=False)
return None
else:
return psser._with_new_scol(psser.spark.column) # TODO: dtype?
def _fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
limit: Optional[int] = None,
part_cols: Sequence["ColumnOrName"] = (),
) -> "Series":
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if (method is not None) and (method not in ["ffill", "pad", "backfill", "bfill"]):
raise ValueError("Expecting 'pad', 'ffill', 'backfill' or 'bfill'.")
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
if not self.spark.nullable:
return self.copy()
cond = scol.isNull()
if value is not None:
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
scol = F.when(cond, value).otherwise(scol)
else:
if method in ["ffill", "pad"]:
func = F.last
end = Window.currentRow - 1
if limit is not None:
begin = Window.currentRow - limit
else:
begin = Window.unboundedPreceding
elif method in ["bfill", "backfill"]:
func = F.first
begin = Window.currentRow + 1
if limit is not None:
end = Window.currentRow + limit
else:
end = Window.unboundedFollowing
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(begin, end)
)
scol = F.when(cond, func(scol, True).over(window)).otherwise(scol)
return DataFrame(
self._psdf._internal.with_new_spark_column(
self._column_label, scol.alias(name_like_string(self.name)) # TODO: dtype?
)
)._psser_for(self._column_label)
def dropna(self, axis: Axis = 0, inplace: bool = False, **kwargs: Any) -> Optional["Series"]:
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ps.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO: last two examples from pandas produce different results.
psdf = self._psdf[[self.name]].dropna(axis=axis, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "Series":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ps.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ps.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
if isinstance(self.spark.data_type, NumericType):
scol = self.spark.column
if lower is not None:
scol = F.when(scol < lower, lower).otherwise(scol)
if upper is not None:
scol = F.when(scol > upper, upper).otherwise(scol)
return self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0]),
field=self._internal.data_fields[0],
)
else:
return self
def drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> "Series":
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
index : None
Redundant for application on Series, but index can be used instead of labels.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
Returns
-------
Series
Series with specified index labels removed.
See Also
--------
Series.dropna
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop single label A
>>> s.drop('A')
B 1
C 2
dtype: int64
Drop labels B and C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
With 'index' rather than 'labels' returns exactly same result.
>>> s.drop(index='A')
B 1
C 2
dtype: int64
>>> s.drop(index=['B', 'C'])
A 0
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
>>> s.drop(('lama', 'weight'))
lama speed 45.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop([('lama', 'speed'), ('falcon', 'weight')])
lama weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return first_series(self._drop(labels=labels, index=index, level=level))
def _drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> DataFrame:
if labels is not None:
if index is not None:
raise ValueError("Cannot specify both 'labels' and 'index'")
return self._drop(index=labels, level=level)
if index is not None:
internal = self._internal
if level is None:
level = 0
if level >= internal.index_level:
raise ValueError("'level' should be less than the number of indexes")
if is_name_like_tuple(index): # type: ignore
index_list = [cast(Label, index)]
elif is_name_like_value(index):
index_list = [(index,)]
elif all(is_name_like_value(idxes, allow_tuple=False) for idxes in index):
index_list = [(idex,) for idex in index]
elif not all(is_name_like_tuple(idxes) for idxes in index):
raise ValueError(
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
else:
index_list = cast(List[Label], index)
drop_index_scols = []
for idxes in index_list:
try:
index_scols = [
internal.index_spark_columns[lvl] == idx
for lvl, idx in enumerate(idxes, level)
]
except IndexError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
internal.index_level, len(idxes)
)
)
drop_index_scols.append(reduce(lambda x, y: x & y, index_scols))
cond = ~reduce(lambda x, y: x | y, drop_index_scols)
return DataFrame(internal.with_filter(cond))
else:
raise ValueError("Need to specify at least one of 'labels' or 'index'")
def head(self, n: int = 5) -> "Series":
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return first_series(self.to_frame().head(n)).rename(self.name)
def last(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select final periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the last few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the last 3 days:
>>> psser.last('3D')
2018-04-13 3
2018-04-15 4
dtype: int64
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
return first_series(self.to_frame().last(offset)).rename(self.name)
def first(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select first periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the first few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the first 3 days:
>>> psser.first('3D')
2018-04-09 1
2018-04-11 2
dtype: int64
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
return first_series(self.to_frame().first(offset)).rename(self.name)
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self) -> "Series":
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly created Series whereas pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Also
--------
Index.unique
groupby.SeriesGroupBy.unique
Examples
--------
>>> psser = ps.Series([2, 1, 3, 3], name='A')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: A, dtype: int64
>>> ps.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
dtype: datetime64[ns]
>>> psser.name = ('x', 'a')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: (x, a), dtype: int64
"""
sdf = self._internal.spark_frame.select(self.spark.column).distinct()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=[self._column_label],
data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])],
data_fields=[self._internal.data_fields[0]],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def sort_values(
self, ascending: bool = True, inplace: bool = False, na_position: str = "last"
) -> Optional["Series"]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ps.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = ps.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]]._sort(
by=[self.spark.column], ascending=ascending, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def sort_index(
self,
axis: Axis = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["Series"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ps.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df = ps.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')
>>> df.sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
>>> df.sort_index(level=1) # doctest: +SKIP
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
>>> df.sort_index(level=[1, 0])
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].sort_index(
axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def swaplevel(
self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, copy: bool = True
) -> "Series":
"""
Swap levels i and j in a MultiIndex.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data. Must be True.
Returns
-------
Series
Series with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> psser = ps.Series(['x', 'y'], index=midx)
>>> psser
word number
a 1 x
b 2 y
dtype: object
>>> psser.swaplevel()
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel(0, 1)
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel('number', 'word')
number word
1 a x
2 b y
dtype: object
"""
assert copy is True
return first_series(self.to_frame().swaplevel(i, j, axis=0)).rename(self.name)
def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "Series":
"""
Interchange axes and swap values axes appropriately.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([1, 2, 3], index=["x", "y", "z"])
>>> psser
x 1
y 2
z 3
dtype: int64
>>>
>>> psser.swapaxes(0, 0)
x 1
y 2
z 3
dtype: int64
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
if not i == j == 0:
raise ValueError("Axis must be 0 for Series")
return self.copy()
def add_prefix(self, prefix: str) -> "Series":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
"""
assert isinstance(prefix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(SF.lit(prefix), index_spark_column).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def add_suffix(self, suffix: str) -> "Series":
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
"""
assert isinstance(suffix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(index_spark_column, SF.lit(suffix)).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def corr(self, other: "Series", method: str = "pearson") -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ps.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
columns = ["__corr_arg1__", "__corr_arg2__"]
psdf = self._psdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns]
psdf.columns = columns
c = corr(psdf, method=method)
return c.loc[tuple(columns)]
def nsmallest(self, n: int = 5) -> "Series":
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
dtype: float64
"""
return self.sort_values(ascending=True).head(n)
def nlargest(self, n: int = 5) -> "Series":
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
dtype: float64
"""
return self.sort_values(ascending=False).head(n)
def append(
self, to_append: "Series", ignore_index: bool = False, verify_integrity: bool = False
) -> "Series":
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = ps.Series([1, 2, 3])
>>> s2 = ps.Series([4, 5, 6])
>>> s3 = ps.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With ignore_index set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
"""
return first_series(
self.to_frame().append(to_append.to_frame(), ignore_index, verify_integrity)
).rename(self.name)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "Series":
return first_series(
self.to_frame().sample(n=n, frac=frac, replace=replace, random_state=random_state)
).rename(self.name)
sample.__doc__ = DataFrame.sample.__doc__
@no_type_check
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
def apply(self, func: Callable, args: Sequence[Any] = (), **kwds: Any) -> "Series":
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.transform : Only perform transforming type operations.
DataFrame.apply : The equivalent function for DataFrame.
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ps.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
try:
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
except TypeError:
# Falls back to schema inference if it fails to get signature.
should_infer_schema = True
apply_each = wraps(func)(lambda s: s.apply(func, args=args, **kwds))
if should_infer_schema:
return self.pandas_on_spark._transform_batch(apply_each, None)
else:
sig_return = infer_return_type(func)
if not isinstance(sig_return, ScalarType):
raise ValueError(
"Expected the return type of this function to be of scalar type, "
"but found type {}".format(sig_return)
)
return_type = cast(ScalarType, sig_return)
return self.pandas_on_spark._transform_batch(apply_each, return_type)
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(self, func: Union[str, List[str]]) -> Union[Scalar, "Series"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : str or a list of str
function name(s) as string apply to series.
Returns
-------
scalar, Series
The return can be:
- scalar : when Series.agg is called with single function
- Series : when Series.agg is called with several functions
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Only perform transforming type operations.
Series.groupby : Perform operations over groups.
DataFrame.aggregate : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s.agg('min')
1
>>> s.agg(['min', 'max']).sort_index()
max 4
min 1
dtype: int64
"""
if isinstance(func, list):
return first_series(self.to_frame().aggregate(func)).rename(self.name)
elif isinstance(func, str):
return getattr(self, func)()
else:
raise TypeError("func must be a string or list of strings")
agg = aggregate
def transpose(self, *args: Any, **kwargs: Any) -> "Series":
"""
Return the transpose, which is by definition self.
Examples
--------
It returns the same object as the transpose of the given series object, which is by
definition self.
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.transpose()
0 1
1 2
2 3
dtype: int64
"""
return self.copy()
T = property(transpose)
def transform(
self, func: Union[Callable, List[Callable]], axis: Axis = 0, *args: Any, **kwargs: Any
) -> Union["Series", DataFrame]:
"""
Call ``func`` producing the same type as `self` with transformed values
and that has the same axis length as input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function or list
A function or a list of functions to use for transforming the data.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
An instance of the same type with `self` that must have the same length as input.
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.apply : Invoke function on Series.
DataFrame.transform : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> def sqrt(x) -> float:
... return np.sqrt(x)
>>> s.transform(sqrt)
0 0.000000
1 1.000000
2 1.414214
dtype: float64
Even though the resulting instance must have the same length as the
input, it is possible to provide several input functions:
>>> def exp(x) -> float:
... return np.exp(x)
>>> s.transform([sqrt, exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(func, list):
applied = []
for f in func:
applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
else:
return self.apply(func, args=args, **kwargs)
def round(self, decimals: int = 0) -> "Series":
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
DataFrame.round
Examples
--------
>>> df = ps.Series([0.028208, 0.038683, 0.877076], name='x')
>>> df
0 0.028208
1 0.038683
2 0.877076
Name: x, dtype: float64
>>> df.round(2)
0 0.03
1 0.04
2 0.88
Name: x, dtype: float64
"""
if not isinstance(decimals, int):
raise TypeError("decimals must be an integer")
scol = F.round(self.spark.column, decimals)
return self._with_new_scol(scol) # TODO: dtype?
# TODO: add 'interpolation' parameter.
def quantile(
self, q: Union[float, Iterable[float]] = 0.5, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across
a large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
float or Series
If the current object is a Series and ``q`` is an array, a Series will be
returned where the index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s.quantile(.5)
3.0
>>> (s + 1).quantile(.5)
4.0
>>> s.quantile([.25, .5, .75])
0.25 2.0
0.50 3.0
0.75 4.0
dtype: float64
>>> (s + 1).quantile([.25, .5, .75])
0.25 3.0
0.50 4.0
0.75 5.0
dtype: float64
"""
if isinstance(q, Iterable):
return first_series(
self.to_frame().quantile(q=q, axis=0, numeric_only=False, accuracy=accuracy)
).rename(self.name)
else:
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if not isinstance(q, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(q)
)
q_float = cast(float, q)
if q_float < 0.0 or q_float > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), q_float, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(quantile, name="quantile")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method: str = "average", ascending: bool = True) -> "Series":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> s = ps.Series([1, 2, 2, 3], name='A')
>>> s
0 1
1 2
2 2
3 3
Name: A, dtype: int64
>>> s.rank()
0 1.0
1 2.5
2 2.5
3 4.0
Name: A, dtype: float64
If method is set to 'min', it use lowest rank in group.
>>> s.rank(method='min')
0 1.0
1 2.0
2 2.0
3 4.0
Name: A, dtype: float64
If method is set to 'max', it use highest rank in group.
>>> s.rank(method='max')
0 1.0
1 3.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'first', it is assigned rank in order without groups.
>>> s.rank(method='first')
0 1.0
1 2.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'dense', it leaves no gaps in group.
>>> s.rank(method='dense')
0 1.0
1 2.0
2 2.0
3 3.0
Name: A, dtype: float64
"""
return self._rank(method, ascending).spark.analyzed
def _rank(
self,
method: str = "average",
ascending: bool = True,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> "Series":
if method not in ["average", "min", "max", "first", "dense"]:
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
raise ValueError(msg)
if self._internal.index_level > 1:
raise ValueError("rank do not support index now")
if ascending:
asc_func = lambda scol: scol.asc()
else:
asc_func = lambda scol: scol.desc()
if method == "first":
window = (
Window.orderBy(
asc_func(self.spark.column),
asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)),
)
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.row_number().over(window)
elif method == "dense":
window = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.dense_rank().over(window)
else:
if method == "average":
stat_func = F.mean
elif method == "min":
stat_func = F.min
elif method == "max":
stat_func = F.max
window1 = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
window2 = Window.partitionBy([self.spark.column] + list(part_cols)).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
scol = stat_func(F.row_number().over(window1)).over(window2)
psser = self._with_new_scol(scol)
return psser.astype(np.float64)
def filter(
self,
items: Optional[Sequence[Any]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Axis] = None,
) -> "Series":
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
return first_series(
self.to_frame().filter(items=items, like=like, regex=regex, axis=axis)
).rename(self.name)
filter.__doc__ = DataFrame.filter.__doc__
def describe(self, percentiles: Optional[List[float]] = None) -> "Series":
return first_series(self.to_frame().describe(percentiles)).rename(self.name)
describe.__doc__ = DataFrame.describe.__doc__
def diff(self, periods: int = 1) -> "Series":
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.b.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
Name: b, dtype: float64
Difference with previous value
>>> df.c.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 15.0
4 21.0
5 27.0
Name: c, dtype: float64
Difference with following value
>>> df.c.diff(periods=-1)
0 -3.0
1 -5.0
2 -7.0
3 -9.0
4 -11.0
5 NaN
Name: c, dtype: float64
"""
return self._diff(periods).spark.analyzed
def _diff(self, periods: int, *, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
scol = self.spark.column - F.lag(self.spark.column, periods).over(window)
return self._with_new_scol(scol, field=self._internal.data_fields[0].copy(nullable=True))
def idxmax(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 3, 5],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 5.0
dtype: float64
>>> s.idxmax()
'E'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 5], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 5.0
dtype: float64
>>> s.idxmax()
('b', 'f')
If multiple values equal the maximum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmax()
3
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# desc_nulls_(last|first) is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def idxmin(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 0],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 0.0
dtype: float64
>>> s.idxmin()
'D'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 0], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 0.0
dtype: float64
>>> s.idxmin()
('b', 'f')
If multiple values equal the minimum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmin()
10
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# asc_nulls_(last|first)is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def pop(self, item: Name) -> Union["Series", Scalar]:
"""
Return item and drop from series.
Parameters
----------
item : label
Label of index to be popped.
Returns
-------
Value that is popped from series.
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
>>> s.pop('A')
0
>>> s
B 1
C 2
dtype: int64
>>> s = ps.Series(data=np.arange(3), index=['A', 'A', 'C'])
>>> s
A 0
A 1
C 2
dtype: int64
>>> s.pop('A')
A 0
A 1
dtype: int64
>>> s
C 2
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.pop('lama')
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Also support for MultiIndex with several indexs.
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 0, 0, 0, 1, 1, 1],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 0, 2]]
... )
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
a cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('b', 'falcon', 'speed'))
(b, falcon, speed) 320.0
(b, falcon, speed) 1.0
dtype: float64
"""
if not is_name_like_value(item):
raise TypeError("'key' should be string or tuple that contains strings")
if not is_name_like_tuple(item):
item = (item,)
if self._internal.index_level < len(item):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(item), self._internal.index_level
)
)
internal = self._internal
scols = internal.index_spark_columns[len(item) :] + [self.spark.column]
rows = [internal.spark_columns[level] == index for level, index in enumerate(item)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
psdf = self._drop(item)
self._update_anchor(psdf)
if self._internal.index_level == len(item):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[internal.data_spark_column_names[0]].iloc[0]
item_string = name_like_string(item)
sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, SF.lit(str(item_string)))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=[self._column_label],
data_fields=[self._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
else:
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in internal.index_spark_column_names[len(item) :]
],
index_fields=internal.index_fields[len(item) :],
index_names=self._internal.index_names[len(item) :],
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def copy(self, deep: bool = True) -> "Series":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : bool, default True
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : Series
Examples
--------
>>> s = ps.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
"""
return self._psdf.copy(deep=deep)._psser_for(self._column_label)
def mode(self, dropna: bool = True) -> "Series":
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series.
Examples
--------
>>> s = ps.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 NaN
6 NaN
7 NaN
dtype: float64
>>> s.mode()
0 1.0
dtype: float64
If there are several same modes, all items are shown
>>> s = ps.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
... np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 2.0
6 2.0
7 2.0
8 3.0
9 3.0
10 3.0
11 NaN
12 NaN
13 NaN
dtype: float64
>>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
dtype: float64
With 'dropna' set to 'False', we can also see NaN in the result
>>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
... NaN
dtype: float64
"""
ser_count = self.value_counts(dropna=dropna, sort=False)
sdf_count = ser_count._internal.spark_frame
most_value = ser_count.max()
sdf_most_value = sdf_count.filter("count == {}".format(most_value))
sdf = sdf_most_value.select(
F.col(SPARK_DEFAULT_INDEX_NAME).alias(SPARK_DEFAULT_SERIES_NAME)
)
internal = InternalFrame(spark_frame=sdf, index_spark_columns=None, column_labels=[None])
return first_series(DataFrame(internal))
def keys(self) -> "ps.Index":
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> psser = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> psser.keys() # doctest: +SKIP
MultiIndex([( 'lama', 'speed'),
( 'lama', 'weight'),
( 'lama', 'length'),
( 'cow', 'speed'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'speed'),
('falcon', 'weight'),
('falcon', 'length')],
)
"""
return self.index
# TODO: 'regex', 'method' parameter
def replace(
self,
to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,
value: Optional[Union[List, Tuple]] = None,
regex: bool = False,
) -> "Series":
"""
Replace values given in to_replace with value.
Values of the Series are replaced with other values dynamically.
Parameters
----------
to_replace : str, list, tuple, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str:
- numeric: numeric values equal to to_replace will be replaced with value
- str: string exactly matching to_replace will be replaced with value
* list of str or numeric:
- if to_replace and value are both lists or tuples, they must be the same length.
- str and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values for different
existing values.
For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’
with ‘z’. To use a dict in this way the value parameter should be None.
- For a DataFrame a dict can specify that different values should be replaced
in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1
in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with
whatever is specified in value.
The value parameter should not be None in this case.
You can treat this as a special case of passing two lists except that you are
specifying the column to search in.
See the examples section for examples of each of these.
value : scalar, dict, list, tuple, str default None
Value to replace any values matching to_replace with.
For a DataFrame a dict of values can be used to specify which value to use
for each column (columns not in the dict will not be filled).
Regular expressions, strings and lists or dicts of such objects are also allowed.
Returns
-------
Series
Object after replacement.
Examples
--------
Scalar `to_replace` and `value`
>>> s = ps.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like `to_replace`
>>> s.replace([0, 4], 5000)
0 5000
1 1
2 2
3 3
4 5000
dtype: int64
>>> s.replace([1, 2, 3], [10, 20, 30])
0 0
1 10
2 20
3 30
4 4
dtype: int64
Dict-like `to_replace`
>>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000})
0 0
1 1000
2 2000
3 3000
4 4000
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace(45, 450)
lama speed 450.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace([45, 30, 320], 500)
lama speed 500.0
weight 200.0
length 1.2
cow speed 500.0
weight 250.0
length 1.5
falcon speed 500.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace({45: 450, 30: 300})
lama speed 450.0
weight 200.0
length 1.2
cow speed 300.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
"""
if to_replace is None:
return self.fillna(method="ffill")
if not isinstance(to_replace, (str, list, tuple, dict, int, float)):
raise TypeError("'to_replace' should be one of str, list, tuple, dict, int, float")
if regex:
raise NotImplementedError("replace currently not support for regex")
to_replace = list(to_replace) if isinstance(to_replace, tuple) else to_replace
value = list(value) if isinstance(value, tuple) else value
if isinstance(to_replace, list) and isinstance(value, list):
if not len(to_replace) == len(value):
raise ValueError(
"Replacement lists must match in length. Expecting {} got {}".format(
len(to_replace), len(value)
)
)
to_replace = {k: v for k, v in zip(to_replace, value)}
if isinstance(to_replace, dict):
is_start = True
if len(to_replace) == 0:
current = self.spark.column
else:
for to_replace_, value in to_replace.items():
cond = (
(F.isnan(self.spark.column) | self.spark.column.isNull())
if pd.isna(to_replace_)
else (self.spark.column == SF.lit(to_replace_))
)
if is_start:
current = F.when(cond, value)
is_start = False
else:
current = current.when(cond, value)
current = current.otherwise(self.spark.column)
else:
cond = self.spark.column.isin(to_replace)
# to_replace may be a scalar
if np.array(pd.isna(to_replace)).any():
cond = cond | F.isnan(self.spark.column) | self.spark.column.isNull()
current = F.when(cond, value).otherwise(self.spark.column)
return self._with_new_scol(current) # TODO: dtype?
def update(self, other: "Series") -> None:
"""
Modify Series in place using non-NA values from passed Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series(['a', 'b', 'c'])
>>> s.update(ps.Series(['d', 'e'], index=[0, 2]))
>>> s.sort_index()
0 d
1 b
2 e
dtype: object
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6, 7, 8]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series([1, 2, 3], index=[10, 11, 12])
>>> s
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6], index=[11, 12, 13]))
>>> s.sort_index()
10 1
11 4
12 5
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, np.nan, 6]))
>>> s.sort_index()
0 4.0
1 2.0
2 6.0
dtype: float64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, Series):
raise TypeError("'other' must be a Series")
combined = combine_frames(self._psdf, other._psdf, how="leftouter")
this_scol = combined["this"]._internal.spark_column_for(self._column_label)
that_scol = combined["that"]._internal.spark_column_for(other._column_label)
scol = (
F.when(that_scol.isNotNull(), that_scol)
.otherwise(this_scol)
.alias(self._psdf._internal.spark_column_name_for(self._column_label))
)
internal = combined["this"]._internal.with_new_spark_column(
self._column_label, scol # TODO: dtype?
)
self._psdf._update_internal_frame(internal.resolved_copy, requires_same_anchor=False)
def where(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean Series
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.where(s1 > 0).sort_index()
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s1.where(s1 > 1, 10).sort_index()
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s1 + 100).sort_index()
0 100
1 101
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s2).sort_index()
0 100
1 200
2 2
3 3
4 4
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
assert isinstance(cond, Series)
# We should check the DataFrame from both `cond` and `other`.
should_try_ops_on_diff_frame = not same_anchor(cond, self) or (
isinstance(other, Series) and not same_anchor(other, self)
)
if should_try_ops_on_diff_frame:
# Try to perform it with 'compute.ops_on_diff_frame' option.
psdf = self.to_frame()
tmp_cond_col = verify_temp_column_name(psdf, "__tmp_cond_col__")
tmp_other_col = verify_temp_column_name(psdf, "__tmp_other_col__")
psdf[tmp_cond_col] = cond
psdf[tmp_other_col] = other
# above logic makes a Spark DataFrame looks like below:
# +-----------------+---+----------------+-----------------+
# |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__|
# +-----------------+---+----------------+-----------------+
# | 0| 0| false| 100|
# | 1| 1| false| 200|
# | 3| 3| true| 400|
# | 2| 2| true| 300|
# | 4| 4| true| 500|
# +-----------------+---+----------------+-----------------+
condition = (
F.when(
psdf[tmp_cond_col].spark.column,
psdf._psser_for(psdf._internal.column_labels[0]).spark.column,
)
.otherwise(psdf[tmp_other_col].spark.column)
.alias(psdf._internal.data_spark_column_names[0])
)
internal = psdf._internal.with_new_columns(
[condition], column_labels=self._internal.column_labels
)
return first_series(DataFrame(internal))
else:
if isinstance(other, Series):
other = other.spark.column
condition = (
F.when(cond.spark.column, self.spark.column)
.otherwise(other)
.alias(self._internal.data_spark_column_names[0])
)
return self._with_new_scol(condition)
def mask(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean Series
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.mask(s1 > 0).sort_index()
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s1.mask(s1 > 1, 10).sort_index()
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> s1.mask(s1 > 1, s1 + 100).sort_index()
0 0
1 1
2 102
3 103
4 104
dtype: int64
>>> s1.mask(s1 > 1, s2).sort_index()
0 0
1 1
2 300
3 400
4 500
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
return self.where(cast(Series, ~cond), other)
def xs(self, key: Name, level: Optional[int] = None) -> "Series":
"""
Return cross-section from the Series.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
Series
Cross-section from the original Series
corresponding to the selected index levels.
Examples
--------
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
b cow speed 30.0
weight 250.0
length 1.5
c falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Get values at specified index
>>> s.xs('a')
lama speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at several indexes
>>> s.xs(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at specified index and level
>>> s.xs('lama', level=1)
a speed 45.0
weight 200.0
length 1.2
dtype: float64
"""
if not isinstance(key, tuple):
key = (key,)
if level is None:
level = 0
internal = self._internal
scols = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
+ [self.spark.column]
)
rows = [internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
if internal.index_level == len(key):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[self._internal.data_spark_column_names[0]].iloc[0]
index_spark_column_names = (
internal.index_spark_column_names[:level]
+ internal.index_spark_column_names[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def pct_change(self, periods: int = 1) -> "Series":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([90, 91, 85], index=[2, 4, 1])
>>> psser
2 90
4 91
1 85
dtype: int64
>>> psser.pct_change()
2 NaN
4 0.011111
1 -0.065934
dtype: float64
>>> psser.sort_index().pct_change()
1 NaN
2 0.058824
4 0.011111
dtype: float64
>>> psser.pct_change(periods=2)
2 NaN
4 NaN
1 -0.055556
dtype: float64
"""
scol = self.spark.column
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
prev_row = F.lag(scol, periods).over(window)
return self._with_new_scol((scol - prev_row) / prev_row).spark.analyzed
def combine_first(self, other: "Series") -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = ps.Series([1, np.nan])
>>> s2 = ps.Series([3, 4])
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
if not isinstance(other, ps.Series):
raise TypeError("`combine_first` only allows `Series` for parameter `other`")
if same_anchor(self, other):
this = self.spark.column
that = other.spark.column
combined = self._psdf
else:
combined = combine_frames(self._psdf, other._psdf)
this = combined["this"]._internal.spark_column_for(self._column_label)
that = combined["that"]._internal.spark_column_for(other._column_label)
# If `self` has missing value, use value of `other`
cond = F.when(this.isNull(), that).otherwise(this)
# If `self` and `other` come from same frame, the anchor should be kept
if same_anchor(self, other):
return self._with_new_scol(cond) # TODO: dtype?
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame.select(
*index_scols, cond.alias(self._internal.data_spark_column_names[0])
).distinct()
internal = self._internal.with_new_sdf(
sdf, index_fields=combined._internal.index_fields, data_fields=[None] # TODO: dtype?
)
return first_series(DataFrame(internal))
def dot(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame.
It can also be called using `self @ other` in Python >= 3.5.
.. note:: This API is slightly different from pandas when indexes from both Series
are not aligned. To match with pandas', it requires to read the whole data for,
for example, counting. pandas raises an exception; however, pandas-on-Spark
just proceeds and performs by ignoring mismatches with NaN permissively.
>>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3])
>>> pdf1.dot(pdf2) # doctest: +SKIP
...
ValueError: matrices are not aligned
>>> psdf1 = ps.Series([1, 2, 3], index=[0, 1, 2])
>>> psdf2 = ps.Series([1, 2, 3], index=[0, 1, 3])
>>> psdf1.dot(psdf2) # doctest: +SKIP
5
Parameters
----------
other : Series, DataFrame.
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = ps.Series([0, 1, 2, 3])
>>> s.dot(s)
14
>>> s @ s
14
>>> psdf = ps.DataFrame({'x': [0, 1, 2, 3], 'y': [0, -1, -2, -3]})
>>> psdf
x y
0 0 0
1 1 -1
2 2 -2
3 3 -3
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s.dot(psdf)
...
x 14
y -14
dtype: int64
"""
if isinstance(other, DataFrame):
if not same_anchor(self, other):
if not self.index.sort_values().equals(other.index.sort_values()):
raise ValueError("matrices are not aligned")
other_copy = other.copy() # type: DataFrame
column_labels = other_copy._internal.column_labels
self_column_label = verify_temp_column_name(other_copy, "__self_column__")
other_copy[self_column_label] = self
self_psser = other_copy._psser_for(self_column_label)
product_pssers = [
cast(Series, other_copy._psser_for(label) * self_psser) for label in column_labels
]
dot_product_psser = DataFrame(
other_copy._internal.with_new_columns(product_pssers, column_labels=column_labels)
).sum()
return cast(Series, dot_product_psser).rename(self.name)
else:
assert isinstance(other, Series)
if not same_anchor(self, other):
if len(self.index) != len(other.index):
raise ValueError("matrices are not aligned")
return (self * other).sum()
def __matmul__(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def repeat(self, repeats: Union[int, "Series"]) -> "Series":
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or Series
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
Examples
--------
>>> s = ps.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
1 b
2 c
0 a
1 b
2 c
dtype: object
>>> ps.Series([1, 2, 3]).repeat(0)
Series([], dtype: int64)
"""
if not isinstance(repeats, (int, Series)):
raise TypeError(
"`repeats` argument must be integer or Series, but got {}".format(type(repeats))
)
if isinstance(repeats, Series):
if not same_anchor(self, repeats):
psdf = self.to_frame()
temp_repeats = verify_temp_column_name(psdf, "__temp_repeats__")
psdf[temp_repeats] = repeats
return (
psdf._psser_for(psdf._internal.column_labels[0])
.repeat(psdf[temp_repeats])
.rename(self.name)
)
else:
scol = F.explode(
F.array_repeat(self.spark.column, repeats.astype("int32").spark.column)
).alias(name_like_string(self.name))
sdf = self._internal.spark_frame.select(self._internal.index_spark_columns + [scol])
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
data_spark_columns=[scol_for(sdf, name_like_string(self.name))],
)
return first_series(DataFrame(internal))
else:
if repeats < 0:
raise ValueError("negative dimensions are not allowed")
psdf = self._psdf[[self.name]]
if repeats == 0:
return first_series(DataFrame(psdf._internal.with_filter(SF.lit(False))))
else:
return first_series(ps.concat([psdf] * repeats))
def asof(self, where: Union[Any, List]) -> Union[Scalar, "Series"]:
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
If there is no good value, NaN is returned.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
where : index or array-like of indices
Returns
-------
scalar or Series
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like
Return scalar or Series
Notes
-----
Indices are assumed to be sorted. Raises if this is not the case.
Examples
--------
>>> s = ps.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
A scalar `where`.
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20]).sort_index()
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
"""
should_return_series = True
if isinstance(self.index, ps.MultiIndex):
raise ValueError("asof is not supported for a MultiIndex")
if isinstance(where, (ps.Index, ps.Series, DataFrame)):
raise ValueError("where cannot be an Index, Series or a DataFrame")
if not self.index.is_monotonic_increasing:
raise ValueError("asof requires a sorted index")
if not is_list_like(where):
should_return_series = False
where = [where]
index_scol = self._internal.index_spark_columns[0]
index_type = self._internal.spark_type_for(index_scol)
cond = [
F.max(F.when(index_scol <= SF.lit(index).cast(index_type), self.spark.column))
for index in where
]
sdf = self._internal.spark_frame.select(cond)
if not should_return_series:
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
result = cast(pd.DataFrame, sdf.limit(1).toPandas()).iloc[0, 0]
return result if result is not None else np.nan
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.default_index_type", "distributed", "compute.max_rows", 1):
psdf = ps.DataFrame(sdf) # type: DataFrame
psdf.columns = pd.Index(where)
return first_series(psdf.transpose()).rename(self.name)
def mad(self) -> float:
"""
Return the mean absolute deviation of values.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.mad()
1.0
"""
sdf = self._internal.spark_frame
spark_column = self.spark.column
avg = unpack_scalar(sdf.select(F.avg(spark_column)))
mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg))))
return mad
def unstack(self, level: int = -1) -> DataFrame:
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Notes
-----
Unlike pandas, pandas-on-Spark doesn't check whether an index is duplicated or not
because the checking of duplicated index requires scanning whole data which
can be quite expensive.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1).sort_index()
a b
one 1 2
two 3 4
>>> s.unstack(level=0).sort_index()
one two
a 1 3
b 2 4
"""
if not isinstance(self.index, ps.MultiIndex):
raise ValueError("Series.unstack only support for a MultiIndex")
index_nlevels = self.index.nlevels
if level > 0 and (level > index_nlevels - 1):
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(index_nlevels, level + 1)
)
elif level < 0 and (level < -index_nlevels):
raise IndexError(
"Too many levels: Index has only {} levels, {} is not a valid level number".format(
index_nlevels, level
)
)
internal = self._internal.resolved_copy
index_map = list(zip(internal.index_spark_column_names, internal.index_names))
pivot_col, column_label_names = index_map.pop(level)
index_scol_names, index_names = zip(*index_map)
col = internal.data_spark_column_names[0]
sdf = internal.spark_frame
sdf = sdf.groupby(list(index_scol_names)).pivot(pivot_col).agg(F.first(scol_for(sdf, col)))
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_scol_names],
index_names=list(index_names),
column_label_names=[column_label_names],
)
return DataFrame(internal)
def item(self) -> Scalar:
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of Series.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> psser = ps.Series([10])
>>> psser.item()
10
"""
return self.head(2)._to_internal_pandas().item()
def iteritems(self) -> Iterable[Tuple[Name, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
.. note:: Unlike pandas', the iteritems in pandas-on-Spark returns generator rather
zip object
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = ps.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
internal_index_columns = self._internal.index_spark_column_names
internal_data_column = self._internal.data_spark_column_names[0]
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = row[internal_data_column]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
yield k, v
def items(self) -> Iterable[Tuple[Name, Any]]:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> "Series":
"""
Return Series with requested index level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
Returns
-------
Series
Series with requested index level(s) removed.
Examples
--------
>>> psser = ps.Series(
... [1, 2, 3],
... index=pd.MultiIndex.from_tuples(
... [("x", "a"), ("x", "b"), ("y", "c")], names=["level_1", "level_2"]
... ),
... )
>>> psser
level_1 level_2
x a 1
b 2
y c 3
dtype: int64
Removing specific index level by level
>>> psser.droplevel(0)
level_2
a 1
b 2
c 3
dtype: int64
Removing specific index level by name
>>> psser.droplevel("level_2")
level_1
x 1
x 2
y 3
dtype: int64
"""
return first_series(self.to_frame().droplevel(level=level, axis=0)).rename(self.name)
def tail(self, n: int = 5) -> "Series":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> psser = ps.Series([1, 2, 3, 4, 5])
>>> psser
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> psser.tail(3) # doctest: +SKIP
2 3
3 4
4 5
dtype: int64
"""
return first_series(self.to_frame().tail(n=n)).rename(self.name)
def explode(self) -> "Series":
"""
Transform each element of a list-like to a row.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> psser = ps.Series([[1, 2, 3], [], [3, 4]])
>>> psser
0 [1, 2, 3]
1 []
2 [3, 4]
dtype: object
>>> psser.explode() # doctest: +SKIP
0 1.0
0 2.0
0 3.0
1 NaN
2 3.0
2 4.0
dtype: float64
"""
if not isinstance(self.spark.data_type, ArrayType):
return self.copy()
scol = F.explode_outer(self.spark.column).alias(name_like_string(self._column_label))
internal = self._internal.with_new_columns([scol], keep_order=False)
return first_series(DataFrame(internal))
def argsort(self) -> "Series":
"""
Return the integer indices that would sort the Series values.
Unlike pandas, the index order is not preserved in the result.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
Examples
--------
>>> psser = ps.Series([3, 3, 4, 1, 6, 2, 3, 7, 8, 7, 10])
>>> psser
0 3
1 3
2 4
3 1
4 6
5 2
6 3
7 7
8 8
9 7
10 10
dtype: int64
>>> psser.argsort().sort_index()
0 3
1 5
2 0
3 1
4 6
5 2
6 4
7 7
8 9
9 8
10 10
dtype: int64
"""
notnull = self.loc[self.notnull()]
sdf_for_index = notnull._internal.spark_frame.select(notnull._internal.index_spark_columns)
tmp_join_key = verify_temp_column_name(sdf_for_index, "__tmp_join_key__")
sdf_for_index, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_index, tmp_join_key
)
# sdf_for_index:
# +----------------+-----------------+
# |__tmp_join_key__|__index_level_0__|
# +----------------+-----------------+
# | 0| 0|
# | 1| 1|
# | 2| 2|
# | 3| 3|
# | 4| 4|
# +----------------+-----------------+
sdf_for_data = notnull._internal.spark_frame.select(
notnull.spark.column.alias("values"), NATURAL_ORDER_COLUMN_NAME
)
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, SPARK_DEFAULT_SERIES_NAME
)
# sdf_for_data:
# +---+------+-----------------+
# | 0|values|__natural_order__|
# +---+------+-----------------+
# | 0| 3| 25769803776|
# | 1| 3| 51539607552|
# | 2| 4| 77309411328|
# | 3| 1| 103079215104|
# | 4| 2| 128849018880|
# +---+------+-----------------+
sdf_for_data = sdf_for_data.sort(
scol_for(sdf_for_data, "values"), NATURAL_ORDER_COLUMN_NAME
).drop("values", NATURAL_ORDER_COLUMN_NAME)
tmp_join_key = verify_temp_column_name(sdf_for_data, "__tmp_join_key__")
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, tmp_join_key
)
# sdf_for_index: sdf_for_data:
# +----------------+-----------------+ +----------------+---+
# |__tmp_join_key__|__index_level_0__| |__tmp_join_key__| 0|
# +----------------+-----------------+ +----------------+---+
# | 0| 0| | 0| 3|
# | 1| 1| | 1| 4|
# | 2| 2| | 2| 0|
# | 3| 3| | 3| 1|
# | 4| 4| | 4| 2|
# +----------------+-----------------+ +----------------+---+
sdf = sdf_for_index.join(sdf_for_data, on=tmp_join_key).drop(tmp_join_key)
internal = self._internal.with_new_sdf(
spark_frame=sdf,
data_columns=[SPARK_DEFAULT_SERIES_NAME],
index_fields=[
InternalField(dtype=field.dtype) for field in self._internal.index_fields
],
data_fields=[None],
)
psser = first_series(DataFrame(internal))
return cast(
Series,
ps.concat([psser, self.loc[self.isnull()].spark.transform(lambda _: SF.lit(-1))]),
)
def argmax(self) -> int:
"""
Return int position of the largest value in the Series.
If the maximum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the maximum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax() # doctest: +SKIP
2
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
max_value = sdf.select(
F.max(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if max_value[1] is None:
raise ValueError("attempt to get argmax of an empty sequence")
elif max_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the maximum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == max_value[0]
).head()[0]
def argmin(self) -> int:
"""
Return int position of the smallest value in the Series.
If the minimum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the minimum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmin() # doctest: +SKIP
0
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
min_value = sdf.select(
F.min(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if min_value[1] is None:
raise ValueError("attempt to get argmin of an empty sequence")
elif min_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the minimum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == min_value[0]
).head()[0]
def compare(
self, other: "Series", keep_shape: bool = False, keep_equal: bool = False
) -> DataFrame:
"""
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
Returns
-------
DataFrame
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series(["a", "b", "c", "d", "e"])
>>> s2 = ps.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2).sort_index()
self other
1 b a
3 d b
Keep all original rows
>>> s1.compare(s2, keep_shape=True).sort_index()
self other
0 None None
1 b a
2 None None
3 d b
4 None None
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True).sort_index()
self other
0 a a
1 b a
2 c c
3 d b
4 e e
>>> reset_option("compute.ops_on_diff_frames")
"""
if same_anchor(self, other):
self_column_label = verify_temp_column_name(other.to_frame(), "__self_column__")
other_column_label = verify_temp_column_name(self.to_frame(), "__other_column__")
combined = DataFrame(
self._internal.with_new_columns(
[self.rename(self_column_label), other.rename(other_column_label)]
)
) # type: DataFrame
else:
if not self.index.equals(other.index):
raise ValueError("Can only compare identically-labeled Series objects")
combined = combine_frames(self.to_frame(), other.to_frame())
this_column_label = "self"
that_column_label = "other"
if keep_equal and keep_shape:
combined.columns = pd.Index([this_column_label, that_column_label])
return combined
this_data_scol = combined._internal.data_spark_columns[0]
that_data_scol = combined._internal.data_spark_columns[1]
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame
if keep_shape:
this_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(this_data_scol)
.alias(this_column_label)
)
this_field = combined._internal.data_fields[0].copy(
name=this_column_label, nullable=True
)
that_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(that_data_scol)
.alias(that_column_label)
)
that_field = combined._internal.data_fields[1].copy(
name=that_column_label, nullable=True
)
else:
sdf = sdf.filter(~this_data_scol.eqNullSafe(that_data_scol))
this_scol = this_data_scol.alias(this_column_label)
this_field = combined._internal.data_fields[0].copy(name=this_column_label)
that_scol = that_data_scol.alias(that_column_label)
that_field = combined._internal.data_fields[1].copy(name=that_column_label)
sdf = sdf.select(*index_scols, this_scol, that_scol, NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=combined._internal.index_fields,
column_labels=[(this_column_label,), (that_column_label,)],
data_spark_columns=[scol_for(sdf, this_column_label), scol_for(sdf, that_column_label)],
data_fields=[this_field, that_field],
column_label_names=[None],
)
return DataFrame(internal)
def align(
self,
other: Union[DataFrame, "Series"],
join: str = "outer",
axis: Optional[Axis] = None,
copy: bool = True,
) -> Tuple["Series", Union[DataFrame, "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (Series, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> s2 = ps.Series(["g", "h", "i"], index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(s2)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
10 g
11 None
12 None
20 h
30 i
dtype: object
Align with the join type "inner":
>>> aligned_l, aligned_r = s1.align(s2, join="inner")
>>> aligned_l.sort_index()
10 7
dtype: int64
>>> aligned_r.sort_index()
10 g
dtype: object
Align with a DataFrame:
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(df)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
self_df = self.to_frame()
left, right = self_df.align(other, join=join, axis=axis, copy=False)
if left is self_df:
left_ser = self
else:
left_ser = first_series(left).rename(self.name)
return (left_ser.copy(), right.copy()) if copy else (left_ser, right)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Axis = 0,
) -> "Series":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
dtype: int64
>>> psser.between_time('0:15', '0:45')
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
dtype: int64
"""
return first_series(
self.to_frame().between_time(start_time, end_time, include_start, include_end, axis)
).rename(self.name)
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0
) -> "Series":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
dtype: int64
>>> psser.at_time('12:00')
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
dtype: int64
"""
return first_series(self.to_frame().at_time(time, asof, axis)).rename(self.name)
def _cum(
self,
func: Callable[[Column], Column],
skipna: bool,
part_cols: Sequence["ColumnOrName"] = (),
ascending: bool = True,
) -> "Series":
# This is used to cummin, cummax, cumsum, etc.
if ascending:
window = (
Window.orderBy(F.asc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
else:
window = (
Window.orderBy(F.desc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# Manually sets nulls given the column defined above.
self.spark.column.isNull(),
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# By going through with max, it sets True after the first time it meets null.
F.max(self.spark.column.isNull()).over(window),
# Manually sets nulls given the column defined above.
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
return self._with_new_scol(scol)
def _cumsum(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
psser = self
if isinstance(psser.spark.data_type, BooleanType):
psser = psser.spark.transform(lambda scol: scol.cast(LongType()))
elif not isinstance(psser.spark.data_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return psser._cum(F.sum, skipna, part_cols)
def _cumprod(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if isinstance(self.spark.data_type, BooleanType):
scol = self._cum(
lambda scol: F.min(F.coalesce(scol, SF.lit(True))), skipna, part_cols
).spark.column.cast(LongType())
elif isinstance(self.spark.data_type, NumericType):
num_zeros = self._cum(
lambda scol: F.sum(F.when(scol == 0, 1).otherwise(0)), skipna, part_cols
).spark.column
num_negatives = self._cum(
lambda scol: F.sum(F.when(scol < 0, 1).otherwise(0)), skipna, part_cols
).spark.column
sign = F.when(num_negatives % 2 == 0, 1).otherwise(-1)
abs_prod = F.exp(
self._cum(lambda scol: F.sum(F.log(F.abs(scol))), skipna, part_cols).spark.column
)
scol = F.when(num_zeros > 0, 0).otherwise(sign * abs_prod)
if isinstance(self.spark.data_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(self.spark.data_type),
self.spark.data_type.simpleString(),
)
)
return self._with_new_scol(scol)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
str = CachedAccessor("str", StringMethods)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# ----------------------------------------------------------------------
def _apply_series_op(
self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False
) -> "Series":
psser_or_scol = op(self)
if isinstance(psser_or_scol, Series):
psser = psser_or_scol
else:
psser = self._with_new_scol(cast(Column, psser_or_scol))
if should_resolve:
internal = psser._internal.resolved_copy
return first_series(DataFrame(internal))
else:
return psser
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str_type,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Scalar:
"""
Applies sfun to the column and returns a scalar
Parameters
----------
sfun : the stats function to be used for aggregation
name : original pandas API name.
axis : used only for sanity check because series only support index axis.
numeric_only : not used by this implementation, but passed down by stats functions
"""
from inspect import signature
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
num_args = len(signature(sfun).parameters)
spark_column = self.spark.column
spark_type = self.spark.data_type
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = cast(Callable[[Column], Column], sfun)(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = cast(Callable[[Column, DataType], Column], sfun)(spark_column, spark_type)
min_count = kwargs.get("min_count", 0)
if min_count > 0:
scol = F.when(Frame._count_expr(spark_column, spark_type) >= min_count, scol)
result = unpack_scalar(self._internal.spark_frame.select(scol))
return result if result is not None else np.nan
# Override the `groupby` to specify the actual return type annotation.
def groupby(
self,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "SeriesGroupBy":
return cast(
"SeriesGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)
)
groupby.__doc__ = Frame.groupby.__doc__
def _build_groupby(
self, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "SeriesGroupBy":
from pyspark.pandas.groupby import SeriesGroupBy
return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)
def __getitem__(self, key: Any) -> Any:
try:
if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or (
type(key) == int
and not isinstance(self.index.spark.data_type, (IntegerType, LongType))
):
# Seems like pandas Series always uses int as positional search when slicing
# with ints, searches based on index values when the value is int.
return self.iloc[key]
return self.loc[key]
except SparkPandasIndexingError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
def __getattr__(self, item: str_type) -> Any:
if item.startswith("__"):
raise AttributeError(item)
if hasattr(MissingPandasLikeSeries, item):
property_or_func = getattr(MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'Series' object has no attribute '{}'".format(item))
def _to_internal_pandas(self) -> pd.Series:
"""
Return a pandas Series directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._psdf._internal.to_pandas_frame[self.name]
def __repr__(self) -> str_type:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string(name=self.name, dtype=self.dtype)
pser = self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
pser_length = len(pser)
pser = pser.iloc[:max_display_count]
if pser_length > max_display_count:
repr_string = pser.to_string(length=True)
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
dtype_name = str(self.dtype.name)
if self.name is None:
footer = "\ndtype: {dtype}\nShowing only the first {length}".format(
length=length, dtype=pprint_thing(dtype_name)
)
else:
footer = (
"\nName: {name}, dtype: {dtype}"
"\nShowing only the first {length}".format(
length=length, name=self.name, dtype=pprint_thing(dtype_name)
)
)
return rest + footer
return pser.to_string(name=self.name, dtype=self.dtype)
def __dir__(self) -> Iterable[str_type]:
if not isinstance(self.spark.data_type, StructType):
fields = []
else:
fields = [f for f in self.spark.data_type.fieldNames() if " " not in f]
return list(super().__dir__()) + fields
def __iter__(self) -> None:
return MissingPandasLikeSeries.__iter__(self)
if sys.version_info >= (3, 7):
# In order to support the type hints such as Series[...]. See DataFrame.__class_getitem__.
def __class_getitem__(cls, params: Any) -> Type[SeriesType]:
return _create_type_for_series_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# The implementation is in its metaclass so this flag is needed to distinguish
# pandas-on-Spark Series.
is_series = None
def unpack_scalar(sdf: SparkDataFrame) -> Any:
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = cast(pd.DataFrame, sdf.limit(2).toPandas())
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == 1, (row, l2)
return l2[0]
@overload
def first_series(df: DataFrame) -> Series:
...
@overload
def first_series(df: pd.DataFrame) -> pd.Series:
...
def first_series(df: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:
"""
Takes a DataFrame and returns the first column of the DataFrame as a Series
"""
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
if isinstance(df, DataFrame):
return df._psser_for(df._internal.column_labels[0])
else:
return df[df.columns[0]]
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.series
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.series.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.series tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.series,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 0.001711 |
# -*- coding: utf-8 -*-
"""
flask_script._compat
~~~~~~~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
input = input
from string import ascii_lowercase
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
input = raw_input
from string import lower as ascii_lowercase
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
| 0.003088 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import unittest
import mock
import webapp2
import webtest
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import bug_details
from dashboard.common import testing_common
from dashboard.services import issue_tracker_service
GET_ISSUE_DATA = {
'owner': {'name': '[email protected]'},
'state': 'open',
'status': 'Untriaged',
'summary': 'Regression in sunspider',
'published': '2017-02-17T23:08:44',
}
GET_COMMENTS_DATA = [
{
'author': '[email protected]',
'content': 'This is the first comment',
'published': '2017-02-17T09:59:55',
}, {
'author': '[email protected]',
'content': 'This is the second comment',
'published': '2017-02-17T10:00:0',
}, {
'author': '[email protected]',
'content': 'The following revision refers to this bug:\n'
' https://chromium.googlesource.com/chromium/src.git/+/'
'9ac6e6466cc0df7e1a3ad4488c5c8bdc2db4da36\n\n'
'Review-Url: https://codereview.chromium.org/2707483002\n\n',
'published': '2017-02-17T23:08:44',
}
]
class BugDetailsHandlerTest(testing_common.TestCase):
def setUp(self):
super(BugDetailsHandlerTest, self).setUp()
app = webapp2.WSGIApplication([(
'/bug_details', bug_details.BugDetailsHandler)])
self.testapp = webtest.TestApp(app)
# Mocks fetching bugs from issue tracker.
@unittest.skipIf(sys.platform.startswith('linux'), 'oauth2 mock error')
@mock.patch('services.issue_tracker_service.discovery.build',
mock.MagicMock())
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'GetIssue',
mock.MagicMock(return_value=GET_ISSUE_DATA))
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'GetIssueComments',
mock.MagicMock(return_value=GET_COMMENTS_DATA))
def testPost(self):
response = self.testapp.post('/bug_details', {'bug_id': '12345'})
self.assertEqual(
'Regression in sunspider',
self.GetJsonValue(response, 'summary'))
self.assertEqual(
'[email protected]',
self.GetJsonValue(response, 'owner'))
self.assertEqual(
'2017-02-17T23:08:44',
self.GetJsonValue(response, 'published'))
self.assertEqual(
'open',
self.GetJsonValue(response, 'state'))
self.assertEqual(
'Untriaged',
self.GetJsonValue(response, 'status'))
comments = self.GetJsonValue(response, 'comments')
self.assertEqual(3, len(comments))
self.assertEqual('This is the second comment', comments[1]['content'])
self.assertItemsEqual(
['https://codereview.chromium.org/2707483002'],
self.GetJsonValue(response, 'review_urls'))
if __name__ == '__main__':
unittest.main()
| 0.002854 |
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A drop-in replacement for the standard-library `logging` module that
allows {}-style log formatting on Python 2 and 3.
Provides everything the "logging" module does. The only difference is
that when getLogger(name) instantiates a logger that logger uses
{}-style formatting.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from copy import copy
from logging import * # noqa
import subprocess
import threading
def logsafe(val):
"""Coerce a potentially "problematic" value so it can be formatted
in a Unicode log string.
This works around a number of pitfalls when logging objects in
Python 2:
- Logging path names, which must be byte strings, requires
conversion for output.
- Some objects, including some exceptions, will crash when you call
`unicode(v)` while `str(v)` works fine. CalledProcessError is an
example.
"""
# Already Unicode.
if isinstance(val, unicode):
return val
# Bytestring: needs decoding.
elif isinstance(val, bytes):
# Blindly convert with UTF-8. Eventually, it would be nice to
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode('utf8', 'replace')
# A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError):
try:
return unicode(val)
except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__
# instead.
return str(val).decode('utf8', 'replace')
# Other objects are used as-is so field access, etc., still works in
# the format string.
else:
return val
class StrFormatLogger(Logger):
"""A version of `Logger` that uses `str.format`-style formatting
instead of %-style formatting.
"""
class _LogMessage(object):
def __init__(self, msg, args, kwargs):
self.msg = msg
self.args = args
self.kwargs = kwargs
def __str__(self):
args = [logsafe(a) for a in self.args]
kwargs = dict((k, logsafe(v)) for (k, v) in self.kwargs.items())
return self.msg.format(*args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs):
"""Log msg.format(*args, **kwargs)"""
m = self._LogMessage(msg, args, kwargs)
return super(StrFormatLogger, self)._log(level, m, (), exc_info, extra)
class ThreadLocalLevelLogger(Logger):
"""A version of `Logger` whose level is thread-local instead of shared.
"""
def __init__(self, name, level=NOTSET):
self._thread_level = threading.local()
self.default_level = NOTSET
super(ThreadLocalLevelLogger, self).__init__(name, level)
@property
def level(self):
try:
return self._thread_level.level
except AttributeError:
self._thread_level.level = self.default_level
return self.level
@level.setter
def level(self, value):
self._thread_level.level = value
def set_global_level(self, level):
"""Set the level on the current thread + the default value for all
threads.
"""
self.default_level = level
self.setLevel(level)
class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger):
pass
my_manager = copy(Logger.manager)
my_manager.loggerClass = BeetsLogger
def getLogger(name=None):
if name:
return my_manager.getLogger(name)
else:
return Logger.root
| 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
| 0.005043 |
# -*- encoding: utf-8 -*-
import argparse
import sys
import traceback
import csv
from hashlib import md5
from yamlns import namespace as ns
import mailchimp_marketing as MailchimpMarketing
from consolemsg import step, error, success
from erppeek import Client
import dbconfig
ERP_CLIENT = Client(**dbconfig.erppeek)
MAILCHIMP_CLIENT = MailchimpMarketing.Client(
dict(api_key=dbconfig.MAILCHIMP_APIKEY, server=dbconfig.MAILCHIMP_SERVER_PREFIX)
)
doit = False
def get_mailchimp_list_id(list_name):
all_lists = MAILCHIMP_CLIENT.lists.get_all_lists(
fields=['lists.id,lists.name'],
count=100
)['lists']
for l in all_lists:
if l['name'] == list_name:
return l['id']
raise Exception("List: <{}> not found".format(list_name))
def read_data_from_csv(csv_file):
with open(csv_file, 'rb') as f:
reader = csv.reader(f, delimiter=';')
header = reader.next()
# check if file is utf8 + BOM
if '\xef\xbb\xbf' in header[0]:
raise IOError
if len(header) == 1:
reader = csv.reader(f, delimiter=',')
header = header[0].split(',')
csv_content = [ns(dict(zip(header, row))) for row in reader if row[0]]
return csv_content
def get_subscriber_hash(email):
subscriber_hash = md5(email.lower()).hexdigest()
return subscriber_hash
def archive_clients_from_list(list_name, email_list):
if not doit:
return ""
list_id = get_mailchimp_list_id(list_name)
operations = []
for email in email_list:
operation = {
"method": "DELETE",
"path": "/lists/{list_id}/members/{subscriber_hash}".format(
list_id=list_id,
subscriber_hash=get_subscriber_hash(email)
),
"operation_id": email,
}
operations.append(operation)
payload = {
"operations": operations
}
try:
response = MAILCHIMP_CLIENT.batches.start(payload)
except ApiClientError as error:
msg = "An error occurred an archiving batch request, reason: {}"
error(msg.format(error.text))
else:
batch_id = response['id']
while response['status'] != 'finished':
time.sleep(2)
response = MAILCHIMP_CLIENT.batches.status(batch_id)
step("Archived operation finished!!")
step("Total operations: {}, finished operations: {}, errored operations: {}".format(
response['total_operations'],
response['finished_operations'],
response['errored_operations']
))
result_summary = requests.get(response['response_body_url'])
result_summary.raise_for_status()
return result_summary.content
def is_titular_partner_mail(email):
email_ids = ERP_CLIENT.ResPartnerAddress.search([('email', '=', email)])
if not email_ids:
return False
partners_ids = [
item['partner_id'][0]
for item in ERP_CLIENT.ResPartnerAddress.read(email_ids, ['partner_id'])
if item and 'partner_id' in item and item['partner_id']
]
polisses_ids = ERP_CLIENT.GiscedataPolissa.search([('titular','in',partners_ids)])
if not polisses_ids:
return False
return True
def get_not_active(emails):
to_archive = []
total = len(emails)
for counter, email in enumerate(emails):
if not is_titular_partner_mail(email):
to_archive.append(email)
step("{}/{} - {} --> no titular", counter+1, total, email)
else:
step("{}/{} - {} --> titular", counter+1, total, email)
return to_archive
def main(list_name, mailchimp_export_file, output):
csv_data = read_data_from_csv(mailchimp_export_file)
step("{} lines read from input csv", len(csv_data))
mails = [item['Email Address'] for item in csv_data]
step("{} emails extracted from input csv", len(mails))
to_archive = get_not_active(mails)
step("{} emails to archive found", len(to_archive))
result = ''
if doit:
step("archiving...")
result = archive_clients_from_list(list_name.strip(), to_archive)
step("storing result {}", len(result))
with open(mailchimp_export_file, 'w') as f:
f.write(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=''
)
parser.add_argument(
'--list',
dest='list_name',
required=True,
help="nom de la llista de mailchimp"
)
parser.add_argument(
'--mailchimp_export_file',
dest='mailchimp_export_file',
required=True,
help="Fitxer amb export del mailchimp"
)
parser.add_argument(
'--output',
dest='output',
required=True,
help="Fitxer de sortida amb els resultats"
)
parser.add_argument(
'--doit',
type=bool,
default=False,
const=True,
nargs='?',
help='realitza les accions'
)
args = parser.parse_args()
global doit
doit = args.doit
if doit:
success("Es faran canvis a les polisses (--doit)")
else:
success("No es faran canvis a les polisses (sense opció --doit)")
try:
main(args.list_name, args.mailchimp_export_file, args.output)
except Exception as e:
traceback.print_exc(file=sys.stdout)
error("El proceso no ha finalizado correctamente: {}", str(e))
else:
success("Script finalizado")
| 0.002179 |
s = "01234567890123456789012345678901234567890123456789"
sa1 = [s]
sa2 = [sa1, sa1]
sa3 = [sa2, sa2, sa2]
sa4 = [sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3, sa3]
da1 = {s : s}
da2 = {s : da1, '1' : da1}
da3 = {s : da2, '1' : da2, '2' : da2}
da4 = {s : da3, '01': da3, '02': da3, '03': da3, '04': da3, '05': da3, '06': da3, '07': da3, '08': da3, '09': da3, '10': da3, '11': da3, '12': da3, '13': da3, '14': da3, '15': da3, '16': da3, '17': da3, '18': da3, '19': da3, '20': da3}
n = 12345678901234567890123456789012345678901234567890
na1 = [n]
na2 = [na1, na1]
na3 = [na2, na2, na2]
na4 = [na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3, na3]
class my_class:
def __repr__(self):
return "my_class: 0123456789012345678901234567890123456789"
c = my_class()
ca1 = [c]
ca2 = [ca1, ca1]
ca3 = [ca2, ca2, ca2]
ca4 = [ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3, ca3]
print('done')
| 0.017774 |
#!/usr/bin/env python
# vim:set et ts=4 sw=4:
""" Handles packages from policy queues
@contact: Debian FTP Master <[email protected]>
@copyright: 2001, 2002, 2003, 2004, 2005, 2006 James Troup <[email protected]>
@copyright: 2009 Joerg Jaspert <[email protected]>
@copyright: 2009 Frank Lichtenheld <[email protected]>
@copyright: 2009 Mark Hymers <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# <mhy> So how do we handle that at the moment?
# <stew> Probably incorrectly.
################################################################################
import os
import datetime
import re
import sys
import traceback
import apt_pkg
from daklib.dbconn import *
from daklib import daklog
from daklib import utils
from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
from daklib.config import Config
from daklib.archive import ArchiveTransaction
from daklib.urgencylog import UrgencyLog
import daklib.announce
# Globals
Options = None
Logger = None
################################################################################
def do_comments(dir, srcqueue, opref, npref, line, fn, transaction):
session = transaction.session
actions = []
for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
lines = open(os.path.join(dir, comm)).readlines()
if len(lines) == 0 or lines[0] != line + "\n": continue
# If the ACCEPT includes a _<arch> we only accept that .changes.
# Otherwise we accept all .changes that start with the given prefix
changes_prefix = comm[len(opref):]
if changes_prefix.count('_') < 2:
changes_prefix = changes_prefix + '_'
else:
changes_prefix = changes_prefix + '.changes'
# We need to escape "_" as we use it with the LIKE operator (via the
# SQLA startwith) later.
changes_prefix = changes_prefix.replace("_", r"\_")
uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=srcqueue) \
.join(PolicyQueueUpload.changes).filter(DBChange.changesname.startswith(changes_prefix)) \
.order_by(PolicyQueueUpload.source_id)
reason = "".join(lines[1:])
actions.extend((u, reason) for u in uploads)
if opref != npref:
newcomm = npref + comm[len(opref):]
newcomm = utils.find_next_free(os.path.join(dir, newcomm))
transaction.fs.move(os.path.join(dir, comm), newcomm)
actions.sort()
for u, reason in actions:
print("Processing changes file: {0}".format(u.changes.changesname))
fn(u, srcqueue, reason, transaction)
################################################################################
def try_or_reject(function):
def wrapper(upload, srcqueue, comments, transaction):
try:
function(upload, srcqueue, comments, transaction)
except Exception as e:
comments = 'An exception was raised while processing the package:\n{0}\nOriginal comments:\n{1}'.format(traceback.format_exc(), comments)
try:
transaction.rollback()
real_comment_reject(upload, srcqueue, comments, transaction)
except Exception as e:
comments = 'In addition an exception was raised while trying to reject the upload:\n{0}\nOriginal rejection:\n{1}'.format(traceback.format_exc(), comments)
transaction.rollback()
real_comment_reject(upload, srcqueue, comments, transaction, notify=False)
if not Options['No-Action']:
transaction.commit()
return wrapper
################################################################################
@try_or_reject
def comment_accept(upload, srcqueue, comments, transaction):
for byhand in upload.byhand:
path = os.path.join(srcqueue.path, byhand.filename)
if os.path.exists(path):
raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))
cnf = Config()
fs = transaction.fs
session = transaction.session
changesname = upload.changes.changesname
allow_tainted = srcqueue.suite.archive.tainted
# We need overrides to get the target component
overridesuite = upload.target_suite
if overridesuite.overridesuite is not None:
overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()
def binary_component_func(db_binary):
override = session.query(Override).filter_by(suite=overridesuite, package=db_binary.package) \
.join(OverrideType).filter(OverrideType.overridetype == db_binary.binarytype) \
.join(Component).one()
return override.component
def source_component_func(db_source):
override = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
.join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
.join(Component).one()
return override.component
all_target_suites = [upload.target_suite]
all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])
for suite in all_target_suites:
if upload.source is not None:
transaction.copy_source(upload.source, suite, source_component_func(upload.source), allow_tainted=allow_tainted)
for db_binary in upload.binaries:
# build queues may miss the source package if this is a binary-only upload
if suite != upload.target_suite:
transaction.copy_source(db_binary.source, suite, source_component_func(db_binary.source), allow_tainted=allow_tainted)
transaction.copy_binary(db_binary, suite, binary_component_func(db_binary), allow_tainted=allow_tainted, extra_archives=[upload.target_suite.archive])
# Copy .changes if needed
if upload.target_suite.copychanges:
src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
fs.copy(src, dst, mode=upload.target_suite.archive.mode)
# Copy upload to Process-Policy::CopyDir
# Used on security.d.o to sync accepted packages to ftp-master, but this
# should eventually be replaced by something else.
copydir = cnf.get('Process-Policy::CopyDir') or None
if copydir is not None:
mode = upload.target_suite.archive.mode
if upload.source is not None:
for f in [ df.poolfile for df in upload.source.srcfiles ]:
dst = os.path.join(copydir, f.basename)
if not os.path.exists(dst):
fs.copy(f.fullpath, dst, mode=mode)
for db_binary in upload.binaries:
f = db_binary.poolfile
dst = os.path.join(copydir, f.basename)
if not os.path.exists(dst):
fs.copy(f.fullpath, dst, mode=mode)
src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
dst = os.path.join(copydir, upload.changes.changesname)
if not os.path.exists(dst):
fs.copy(src, dst, mode=mode)
if upload.source is not None and not Options['No-Action']:
urgency = upload.changes.urgency
if urgency not in cnf.value_list('Urgency::Valid'):
urgency = cnf['Urgency::Default']
UrgencyLog().log(upload.source.source, upload.source.version, urgency)
print " ACCEPT"
if not Options['No-Action']:
Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])
pu = get_processed_upload(upload)
daklib.announce.announce_accept(pu)
# TODO: code duplication. Similar code is in process-upload.
# Move .changes to done
src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
now = datetime.datetime.now()
donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
dst = os.path.join(donedir, upload.changes.changesname)
dst = utils.find_next_free(dst)
fs.copy(src, dst, mode=0o644)
remove_upload(upload, transaction)
################################################################################
@try_or_reject
def comment_reject(*args):
real_comment_reject(*args, manual=True)
def real_comment_reject(upload, srcqueue, comments, transaction, notify=True, manual=False):
cnf = Config()
fs = transaction.fs
session = transaction.session
changesname = upload.changes.changesname
queuedir = upload.policy_queue.path
rejectdir = cnf['Dir::Reject']
### Copy files to reject/
poolfiles = [b.poolfile for b in upload.binaries]
if upload.source is not None:
poolfiles.extend([df.poolfile for df in upload.source.srcfiles])
# Not beautiful...
files = [ af.path for af in session.query(ArchiveFile) \
.filter_by(archive=upload.policy_queue.suite.archive) \
.join(ArchiveFile.file) \
.filter(PoolFile.file_id.in_([ f.file_id for f in poolfiles ])) ]
for byhand in upload.byhand:
path = os.path.join(queuedir, byhand.filename)
if os.path.exists(path):
files.append(path)
files.append(os.path.join(queuedir, changesname))
for fn in files:
dst = utils.find_next_free(os.path.join(rejectdir, os.path.basename(fn)))
fs.copy(fn, dst, link=True)
### Write reason
dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(changesname)))
fh = fs.create(dst)
fh.write(comments)
fh.close()
### Send mail notification
if notify:
rejected_by = None
reason = comments
# Try to use From: from comment file if there is one.
# This is not very elegant...
match = re.match(r"\AFrom: ([^\n]+)\n\n", comments)
if match:
rejected_by = match.group(1)
reason = '\n'.join(comments.splitlines()[2:])
pu = get_processed_upload(upload)
daklib.announce.announce_reject(pu, reason, rejected_by)
print " REJECT"
if not Options["No-Action"]:
Logger.log(["Policy Queue REJECT", srcqueue.queue_name, upload.changes.changesname])
changes = upload.changes
remove_upload(upload, transaction)
session.delete(changes)
################################################################################
def remove_upload(upload, transaction):
fs = transaction.fs
session = transaction.session
changes = upload.changes
# Remove byhand and changes files. Binary and source packages will be
# removed from {bin,src}_associations and eventually removed by clean-suites automatically.
queuedir = upload.policy_queue.path
for byhand in upload.byhand:
path = os.path.join(queuedir, byhand.filename)
if os.path.exists(path):
fs.unlink(path)
session.delete(byhand)
fs.unlink(os.path.join(queuedir, upload.changes.changesname))
session.delete(upload)
session.flush()
################################################################################
def get_processed_upload(upload):
pu = daklib.announce.ProcessedUpload()
pu.maintainer = upload.changes.maintainer
pu.changed_by = upload.changes.changedby
pu.fingerprint = upload.changes.fingerprint
pu.suites = [ upload.target_suite ]
pu.from_policy_suites = [ upload.target_suite ]
changes_path = os.path.join(upload.policy_queue.path, upload.changes.changesname)
pu.changes = open(changes_path, 'r').read()
pu.changes_filename = upload.changes.changesname
pu.sourceful = upload.source is not None
pu.source = upload.changes.source
pu.version = upload.changes.version
pu.architecture = upload.changes.architecture
pu.bugs = upload.changes.closes
pu.program = "process-policy"
return pu
################################################################################
def remove_unreferenced_binaries(policy_queue, transaction):
"""Remove binaries that are no longer referenced by an upload
@type policy_queue: L{daklib.dbconn.PolicyQueue}
@type transaction: L{daklib.archive.ArchiveTransaction}
"""
session = transaction.session
suite = policy_queue.suite
query = """
SELECT b.*
FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin
WHERE ba.suite = :suite_id
AND NOT EXISTS (SELECT 1 FROM policy_queue_upload_binaries_map pqubm
JOIN policy_queue_upload pqu ON pqubm.policy_queue_upload_id = pqu.id
WHERE pqu.policy_queue_id = :policy_queue_id
AND pqubm.binary_id = b.id)"""
binaries = session.query(DBBinary).from_statement(query) \
.params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
for binary in binaries:
Logger.log(["removed binary from policy queue", policy_queue.queue_name, binary.package, binary.version])
transaction.remove_binary(binary, suite)
def remove_unreferenced_sources(policy_queue, transaction):
"""Remove sources that are no longer referenced by an upload or a binary
@type policy_queue: L{daklib.dbconn.PolicyQueue}
@type transaction: L{daklib.archive.ArchiveTransaction}
"""
session = transaction.session
suite = policy_queue.suite
query = """
SELECT s.*
FROM source s
JOIN src_associations sa ON s.id = sa.source
WHERE sa.suite = :suite_id
AND NOT EXISTS (SELECT 1 FROM policy_queue_upload pqu
WHERE pqu.policy_queue_id = :policy_queue_id
AND pqu.source_id = s.id)
AND NOT EXISTS (SELECT 1 FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin
WHERE b.source = s.id
AND ba.suite = :suite_id)"""
sources = session.query(DBSource).from_statement(query) \
.params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
for source in sources:
Logger.log(["removed source from policy queue", policy_queue.queue_name, source.source, source.version])
transaction.remove_source(source, suite)
################################################################################
def main():
global Options, Logger
cnf = Config()
session = DBConn().session()
Arguments = [('h',"help","Process-Policy::Options::Help"),
('n',"no-action","Process-Policy::Options::No-Action")]
for i in ["help", "no-action"]:
if not cnf.has_key("Process-Policy::Options::%s" % (i)):
cnf["Process-Policy::Options::%s" % (i)] = ""
queue_name = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
if len(queue_name) != 1:
print "E: Specify exactly one policy queue"
sys.exit(1)
queue_name = queue_name[0]
Options = cnf.subtree("Process-Policy::Options")
if Options["Help"]:
usage()
Logger = daklog.Logger("process-policy")
if not Options["No-Action"]:
urgencylog = UrgencyLog()
with ArchiveTransaction() as transaction:
session = transaction.session
try:
pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
except NoResultFound:
print "E: Cannot find policy queue %s" % queue_name
sys.exit(1)
commentsdir = os.path.join(pq.path, 'COMMENTS')
# The comments stuff relies on being in the right directory
os.chdir(pq.path)
do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, transaction)
do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, transaction)
do_comments(commentsdir, pq, "ACCEPTED.", "ACCEPTED.", "OK", comment_accept, transaction)
remove_unreferenced_binaries(pq, transaction)
remove_unreferenced_sources(pq, transaction)
if not Options['No-Action']:
urgencylog.close()
################################################################################
if __name__ == '__main__':
main()
| 0.004916 |
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module is intended to standardize workflows when working with various databases
such as Impala, Postgresql, etc. Even with pep-249 (DB API 2), workflows differ
slightly. For example Postgresql does not allow changing databases from within a
connection, instead a new connection must be made. However Impala does not allow
specifying a database upon connection, instead a cursor must be created and a USE
command must be issued.
'''
from contextlib import contextmanager
try:
from impala.dbapi import connect as impala_connect
except:
print('Error importing impyla. Please make sure it is installed. '
'See the README for details.')
raise
from itertools import izip
from logging import getLogger
from socket import getfqdn
from tests.comparison.model import Column, Table, TYPES, String
LOG = getLogger(__name__)
IMPALA = 'IMPALA'
POSTGRESQL = 'POSTGRESQL'
MYSQL = 'MYSQL'
DATABASES = [IMPALA, POSTGRESQL, MYSQL]
mysql_connect = None
postgresql_connect = None
class DbConnector(object):
'''Wraps a DB API 2 implementation to provide a standard way of obtaining a
connection and selecting a database.
Any database that supports transactions will have auto-commit enabled.
'''
def __init__(self, db_type, user_name=None, password=None, host_name=None, port=None):
self.db_type = db_type.upper()
if self.db_type not in DATABASES:
raise Exception('Unsupported database: %s' % db_type)
self.user_name = user_name
self.password = password
self.host_name = host_name or getfqdn()
self.port = port
def create_connection(self, db_name=None):
if self.db_type == IMPALA:
connection_class = ImpalaDbConnection
connection = impala_connect(host=self.host_name, port=self.port or 21050)
elif self.db_type == POSTGRESQL:
connection_class = PostgresqlDbConnection
connection_args = {'user': self.user_name or 'postgres'}
if self.password:
connection_args['password'] = self.password
if db_name:
connection_args['database'] = db_name
if self.host_name:
connection_args['host'] = self.host_name
if self.port:
connection_args['port'] = self.port
global postgresql_connect
if not postgresql_connect:
try:
from psycopg2 import connect as postgresql_connect
except:
print('Error importing psycopg2. Please make sure it is installed. '
'See the README for details.')
raise
connection = postgresql_connect(**connection_args)
connection.autocommit = True
elif self.db_type == MYSQL:
connection_class = MySQLDbConnection
connection_args = {'user': self.user_name or 'root'}
if self.password:
connection_args['passwd'] = self.password
if db_name:
connection_args['db'] = db_name
if self.host_name:
connection_args['host'] = self.host_name
if self.port:
connection_args['port'] = self.port
global mysql_connect
if not mysql_connect:
try:
from MySQLdb import connect as mysql_connect
except:
print('Error importing MySQLdb. Please make sure it is installed. '
'See the README for details.')
raise
connection = mysql_connect(**connection_args)
else:
raise Exception('Unexpected database type: %s' % self.db_type)
return connection_class(self, connection, db_name=db_name)
@contextmanager
def open_connection(self, db_name=None):
connection = None
try:
connection = self.create_connection(db_name=db_name)
yield connection
finally:
if connection:
try:
connection.close()
except Exception as e:
LOG.debug('Error closing connection: %s', e, exc_info=True)
class DbConnection(object):
'''Wraps a DB API 2 connection. Instances should only be obtained through the
DbConnector.create_connection(...) method.
'''
@staticmethod
def describe_common_tables(db_connections, filter_col_types=[]):
'''Find and return a list of Table objects that the given connections have in
common.
@param filter_col_types: Ignore any cols if they are of a data type contained
in this collection.
'''
common_table_names = None
for db_connection in db_connections:
table_names = set(db_connection.list_table_names())
if common_table_names is None:
common_table_names = table_names
else:
common_table_names &= table_names
common_table_names = sorted(common_table_names)
tables = list()
for table_name in common_table_names:
common_table = None
mismatch = False
for db_connection in db_connections:
table = db_connection.describe_table(table_name)
table.cols = [col for col in table.cols if col.type not in filter_col_types]
if common_table is None:
common_table = table
continue
if len(common_table.cols) != len(table.cols):
LOG.debug('Ignoring table %s.'
' It has a different number of columns across databases.', table_name)
mismatch = True
break
for left, right in izip(common_table.cols, table.cols):
if not left.name == right.name and left.type == right.type:
LOG.debug('Ignoring table %s. It has different columns %s vs %s.' %
(table_name, left, right))
mismatch = True
break
if mismatch:
break
if not mismatch:
tables.append(common_table)
return tables
def __init__(self, connector, connection, db_name=None):
self.connector = connector
self.connection = connection
self.db_name = db_name
@property
def db_type(self):
return self.connector.db_type
def create_cursor(self):
return DatabaseCursor(self.connection.cursor(), self)
@contextmanager
def open_cursor(self):
'''Returns a new cursor for use in a "with" statement. When the "with" statement ends,
the cursor will be closed.
'''
cursor = None
try:
cursor = self.create_cursor()
yield cursor
finally:
self.close_cursor_quietly(cursor)
def close_cursor_quietly(self, cursor):
if cursor:
try:
cursor.close()
except Exception as e:
LOG.debug('Error closing cursor: %s', e, exc_info=True)
def list_db_names(self):
'''Return a list of database names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_list_db_names_sql())
return [row[0].lower() for row in rows]
def make_list_db_names_sql(self):
return 'SHOW DATABASES'
def list_table_names(self):
'''Return a list of table names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_list_table_names_sql())
return [row[0].lower() for row in rows]
def make_list_table_names_sql(self):
return 'SHOW TABLES'
def describe_table(self, table_name):
'''Return a Table with table and col names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_describe_table_sql(table_name))
table = Table(table_name.lower())
for row in rows:
col_name, data_type = row[:2]
table.cols.append(Column(table, col_name.lower(), self.parse_data_type(data_type)))
return table
def make_describe_table_sql(self, table_name):
return 'DESCRIBE ' + table_name
def parse_data_type(self, sql):
sql = sql.upper()
# Types may have declared a database specific alias
for type_ in TYPES:
if sql in getattr(type_, self.db_type, []):
return type_
for type_ in TYPES:
if type_.__name__.upper() == sql:
return type_
if 'CHAR' in sql:
return String
raise Exception('Unknown data type: ' + sql)
def create_database(self, db_name):
db_name = db_name.lower()
with self.open_cursor() as cursor:
cursor.execute('CREATE DATABASE ' + db_name)
def drop_db_if_exists(self, db_name):
'''This should not be called from a connection to the database being dropped.'''
db_name = db_name.lower()
if db_name not in self.list_db_names():
return
if self.db_name and self.db_name.lower() == db_name:
raise Exception('Cannot drop database while still connected to it')
self.drop_database(db_name)
def drop_database(self, db_name):
db_name = db_name.lower()
self.execute('DROP DATABASE ' + db_name)
@property
def supports_index_creation(self):
return True
def index_table(self, table_name):
table = self.describe_table(table_name)
with self.open_cursor() as cursor:
for col in table.cols:
index_name = '%s_%s' % (table_name, col.name)
if self.db_name:
index_name = '%s_%s' % (self.db_name, index_name)
cursor.execute('CREATE INDEX %s ON %s(%s)' % (index_name, table_name, col.name))
@property
def supports_kill_connection(self):
return False
def kill_connection(self):
'''Kill the current connection and any currently running queries assosiated with the
connection.
'''
raise Exception('Killing connection is not supported')
def materialize_query(self, query_as_text, table_name):
self.execute('CREATE TABLE %s AS %s' % (table_name.lower(), query_as_text))
def drop_table(self, table_name):
self.execute('DROP TABLE ' + table_name.lower())
def execute(self, sql):
with self.open_cursor() as cursor:
cursor.execute(sql)
def execute_and_fetchall(self, sql):
with self.open_cursor() as cursor:
cursor.execute(sql)
return cursor.fetchall()
def close(self):
'''Close the underlying connection.'''
self.connection.close()
def reconnect(self):
self.close()
other = self.connector.create_connection(db_name=self.db_name)
self.connection = other.connection
class DatabaseCursor(object):
'''Wraps a DB API 2 cursor to provide access to the related connection. This class
implements the DB API 2 interface by delegation.
'''
def __init__(self, cursor, connection):
self.cursor = cursor
self.connection = connection
def __getattr__(self, attr):
return getattr(self.cursor, attr)
class ImpalaDbConnection(DbConnection):
def create_cursor(self):
cursor = DbConnection.create_cursor(self)
if self.db_name:
cursor.execute('USE %s' % self.db_name)
return cursor
def drop_database(self, db_name):
'''This should not be called from a connection to the database being dropped.'''
db_name = db_name.lower()
with self.connector.open_connection(db_name) as list_tables_connection:
with list_tables_connection.open_cursor() as drop_table_cursor:
for table_name in list_tables_connection.list_table_names():
drop_table_cursor.execute('DROP TABLE ' + table_name)
self.execute('DROP DATABASE ' + db_name)
@property
def supports_index_creation(self):
return False
class PostgresqlDbConnection(DbConnection):
def make_list_db_names_sql(self):
return 'SELECT datname FROM pg_database'
def make_list_table_names_sql(self):
return '''
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' '''
def make_describe_table_sql(self, table_name):
return '''
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_name = '%s'
ORDER BY ordinal_position''' % table_name
class MySQLDbConnection(DbConnection):
def __init__(self, connector, connection, db_name=None):
DbConnection.__init__(self, connector, connection, db_name=db_name)
self.session_id = self.execute_and_fetchall('SELECT connection_id()')[0][0]
def describe_table(self, table_name):
'''Return a Table with table and col names always in lowercase.'''
rows = self.execute_and_fetchall(self.make_describe_table_sql(table_name))
table = Table(table_name.lower())
for row in rows:
col_name, data_type = row[:2]
if data_type == 'tinyint(1)':
# Just assume this is a boolean...
data_type = 'boolean'
if '(' in data_type:
# Strip the size of the data type
data_type = data_type[:data_type.index('(')]
table.cols.append(Column(table, col_name.lower(), self.parse_data_type(data_type)))
return table
@property
def supports_kill_connection(self):
return True
def kill_connection(self):
with self.connector.open_connection(db_name=self.db_name) as connection:
connection.execute('KILL %s' % (self.session_id))
def index_table(self, table_name):
table = self.describe_table(table_name)
with self.open_cursor() as cursor:
for col in table.cols:
try:
cursor.execute('ALTER TABLE %s ADD INDEX (%s)' % (table_name, col.name))
except Exception as e:
if 'Incorrect index name' not in str(e):
raise
# Some sort of MySQL bug...
LOG.warn('Could not create index on %s.%s: %s' % (table_name, col.name, e))
| 0.011992 |
"""Provide interface for RPC to cluster nodes."""
# Copyright (c) 2014 - I.T. Dev Ltd
#
# This file is part of MCVirt.
#
# MCVirt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# MCVirt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCVirt. If not, see <http://www.gnu.org/licenses/>
from mcvirt.client.rpc import Connection
class Node(Connection):
"""A class to perform remote commands on MCVirt nodes."""
def __init__(self, name, node_config, **kwargs):
"""Set member variables."""
self.name = name
self.ip_address = node_config['ip_address'] if 'ip_address' in node_config else None
super(Node, self).__init__(username=node_config['username'],
password=node_config['password'],
host=self.name,
**kwargs)
| 0.000763 |
from googleanalytics.exception import GoogleAnalyticsClientError
from googleanalytics.data import DataSet
import urllib
filter_operators = ['==', '!=', '>', '<', '>=', '<=', '=~', '!~', '=@', '!@']
class Account:
def __init__(self, connection=None, title=None, id=None,
account_id=None, account_name=None, profile_id=None,
currency=None, time_zone=None, web_property_id=None,
table_id=None, updated=None):
self.connection = connection
self.title = title
self.id = id
self.account_id = account_id
self.account_name = account_name
self.profile_id = profile_id
self.currency = currency
self.time_zone = time_zone
self.updated = updated
self.web_property_id = web_property_id
self.table_id = table_id
def __repr__(self):
return '<Account: %s>' % self.title
def get_data(self, start_date, end_date, metrics, dimensions=[], sort=[], filters=[], start_index=0, max_results=0):
"""
Pulls data in from an account and returns a processed data structure for
easy post processing. This method requires the following inputs:
** Required Arguments **
``start_date``
A ``datetime`` object for the lower bound of your query
``end_date``
A ``datetime`` object for the upper bound of your query
``metrics``
A list of metrics, for example: ['pageviews', 'uniquePageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
** Optional Arguments **
``dimensions``
A list of dimensions, for example: ['country','browser']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReferenceDimensionsMetrics.html
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#dimensionsAndMetrics
``sort``
A list of dimensions or metrics to sort the output by, should probably
be one of the items you specified in ``dimensions`` or ``metrics``.
For example: ['browser', 'pageviews']
See: http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#sorting
``filters``
A list of filters. A filter expression has three parts:
name - The name of the dimension or metric to filter on.
For example: ga:pageviews will filter on the pageviews metric.
operator - Defines the type of filter match to use. Operators are
specific to either dimensions or metrics.
expression - States the values included or excluded from the results.
Expressions use regular expression syntax.
Learn more about valid operators and expressions here:
http://code.google.com/apis/analytics/docs/gdata/gdataReference.html#filtering
The ``filters`` input accepts this data as a list of lists like so. Please
note that order matters, especially when using boolean operators (see
below).
[
['browser', '=~', 'Firefox', 'AND'], # Regular expression match on 'Firefox'
['browser', '=~', 'Internet (Explorer|Exploder)', 'OR'],
['city', '=@', 'York', 'OR'], # All cities with York as a substring
['state', '!=', 'California', 'AND'], # Everything but California
['timeOnPage', '<', '10'], # Reject results where timeonpage < 10sec
]
Filters can be combined with AND boolean logic as well as with OR
boolean logic. When using both operators, the OR operator has higher
precendence. When you are using more than one filter, please specify
a fourth item in your list 'AND' or 'OR' to explicitly spell out the
filters' relationships:
For example, this filter selects data from the United States from the
browser Firefox.
[
['country', '==', 'United States', 'OR'],
['browser', '=@', 'FireFox'],
]
This filter selects data from either the United States or Canada.
[
['country', '==', 'United States', 'AND'],
['country', '==', 'Canada'],
]
The first filter limits results to cities starting with 'L' and ending
with 'S'. The second limits results to browsers starting with 'Fire'
and the cities starting with 'L':
[
['city', '=~', '^L.*S$']
]
[
['city', '=~', '^L', 'AND'],
['browser', '=~', '^Fire']
]
``start_index``
The first row to return, starts at 1. This is useful for paging in combination with
max_results, and also to get results past row 1000 (Google Data does not return
more than 1000 results at once)
``max_results``
Number of results to return.
"""
path = '/analytics/feeds/data'
if start_date > end_date:
raise GoogleAnalyticsClientError('Date orders are reversed')
data = {
'ids': self.table_id,
'start-date': start_date.strftime('%Y-%m-%d'),
'end-date': end_date.strftime('%Y-%m-%d'),
}
if start_index > 0:
data['start-index'] = str(start_index)
if max_results > 0:
data['max-results'] = str(max_results)
if dimensions:
data['dimensions'] = ",".join(['ga:' + d for d in dimensions])
data['metrics'] = ",".join(['ga:' + m for m in metrics])
if sort:
_sort = []
for s in sort:
pre = 'ga:'
if s[0] == '-':
pre = '-ga:'
s = s[1:]
_sort.append(pre + s)
data['sort'] = ",".join(_sort)
if filters:
filter_string = self.process_filters(filters)
data['filters'] = filter_string
data = urllib.urlencode(data)
response = self.connection.make_request('GET', path=path, data=data)
raw_xml = response.read()
processed_data = DataSet(raw_xml)
return processed_data
def process_filters(self, filters):
processed_filters = []
multiple_filters = False
if len(filters) > 1:
multiple_filters = True
for filt in filters:
if len(filt) < 3:
continue
if len(filt) == 3:
name, operator, expression = filt
if multiple_filters:
comb = 'AND'
else:
comb = ''
elif len(filt) == 4:
name, operator, expression, comb = filt
if comb != 'AND' and comb != 'OR':
comb == 'AND'
# Reject any filters with invalid operators
if operator not in filter_operators:
continue
name = 'ga:' + name
# Mapping to GA's boolean operators
if comb == 'AND': comb = ';'
if comb == 'OR': comb = ','
# These three characters are special and must be escaped
if '\\' in expression:
expression = expression.replace('\\', '\\\\')
if ',' in expression:
expression = expression.replace(',', '\,')
if ';' in expression:
expression = expression.replace(';', '\;')
processed_filters.append("".join([name, operator, expression, comb]))
filter_string = "".join(processed_filters)
# Strip any trailing boolean symbols
if filter_string:
if filter_string[-1] == ';' or filter_string[-1] == ',':
filter_string = filter_string[:-1]
return filter_string
| 0.007274 |
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = r1.randint(0,1)
rowData.append(ri)
ri = r1.randint(0,1)
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=10)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM2_many_cols(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# (2, 100, 'cA', 300),
# (4, 200, 'cA', 300),
(10000, 1000, 'cB', 300),
(10000, 3000, 'cC', 500),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
# csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=90)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
# normally we dno't create x and rely on the default
# create the big concat'ed x like the browser, to see what happens
# x = ','.join(map(str, range(colCount)))
kwargs = {
'response': 'C' + str(y),
'max_iter': 10,
'n_folds': 1,
'alpha': 0.0,
'lambda': 0.0,
}
start = time.time()
x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300, returnStringX=False)
# all-zero/all-na cols are dropped. figure out expected # of coefficients
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
expectedCoeffNum = len(x)
# check that the number of entries in coefficients is right (intercept is in there)
actualCoeffNum = len(glm['glm_model']['submodels'][0]['beta']) - 1
if actualCoeffNum!=expectedCoeffNum:
raise Exception("Should be %s expected coefficients in result." % expectedCoeffNum)
if __name__ == '__main__':
h2o.unit_main()
| 0.009249 |
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.ast.TypeAST import TypeAST
from slicc.symbols import Func, Type, Var
class InPortDeclAST(DeclAST):
max_port_rank = 0
def __init__(self, slicc, ident, msg_type, var_expr, pairs, statements):
super(InPortDeclAST, self).__init__(slicc, pairs)
self.ident = ident
self.msg_type = msg_type
self.var_expr = var_expr
self.statements = statements
self.queue_type = TypeAST(slicc, "InPort")
if self.pairs.has_key("rank"):
InPortDeclAST.max_port_rank = max(self.pairs["rank"],
InPortDeclAST.max_port_rank)
def __repr__(self):
return "[InPortDecl: %s]" % self.ident
def generate(self):
symtab = self.symtab
void_type = symtab.find("void", Type)
machine = symtab.state_machine
if machine is None:
self.error("InPort declaration not part of a machine.")
code = self.slicc.codeFormatter()
queue_type = self.var_expr.generate(code)
if not queue_type.isInPort:
self.error("The inport queue's type must have the 'inport' " + \
"attribute. Type '%s' does not have this attribute.",
queue_type)
type = self.queue_type.type
in_port = Var(self.symtab, self.ident, self.location, type, str(code),
self.pairs)
symtab.newSymbol(in_port)
symtab.pushFrame()
param_types = []
# Check for Event
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration defined")
param_types.append(type)
# Check for Address
type = symtab.find("Address", Type)
if type is None:
self.error("in_port decls require 'Address' type to be defined")
param_types.append(type)
if machine.EntryType != None:
param_types.append(machine.EntryType)
if machine.TBEType != None:
param_types.append(machine.TBEType)
# Add the trigger method - FIXME, this is a bit dirty
pairs = { "external" : "yes" }
func = Func(self.symtab, "trigger", self.location, void_type,
param_types, [], "", pairs, None)
symtab.newSymbol(func)
param_types = []
# Check for Event2
type = symtab.find("Event", Type)
if type is None:
self.error("in_port decls require 'Event' enumeration")
param_types.append(type)
# Check for Address2
type = symtab.find("Address", Type)
if type is None:
self.error("in_port decls require 'Address' type to be defined")
param_types.append(type)
# Add the doubleTrigger method - this hack supports tiggering
# two simulateous events
#
# The key is that the second transistion cannot fail because
# the first event cannot be undone therefore you must do some
# checks before calling double trigger to ensure that won't
# happen
func = Func(self.symtab, "doubleTrigger", self.location, void_type,
param_types, [], "", pairs, None)
symtab.newSymbol(func)
# Add the continueProcessing method - this hack supports
# messages that don't trigger events
func = Func(self.symtab, "continueProcessing", self.location,
void_type, [], [], "", pairs, None)
symtab.newSymbol(func)
if self.statements is not None:
rcode = self.slicc.codeFormatter()
rcode.indent()
rcode.indent()
self.statements.generate(rcode, None)
in_port["c_code_in_port"] = str(rcode)
symtab.popFrame()
# Add port to state machine
machine.addInPort(in_port)
# Include max_rank to be used by StateMachine.py
in_port["max_port_rank"] = InPortDeclAST.max_port_rank
| 0.001595 |
#! /usr/bin/env python
#
# Perceptual image hash calculation tool based on algorithm descibed in
# Block Mean Value Based Image Perceptual Hashing by Bian Yang, Fan Gu and Xiamu Niu
#
# Copyright 2014 Commons Machinery http://commonsmachinery.se/
# Distributed under an MIT license, please see LICENSE in the top dir.
import math
import argparse
import PIL.Image as Image
def median(data):
data = sorted(data)
length = len(data)
if length % 2 == 0:
return (data[length // 2] + data[length // 2 + 1]) / 2.0
return data[length // 2]
def total_value_rgba(im, data, x, y):
r, g, b, a = data[y * im.size[0] + x]
if a == 0:
return 765
else:
return r + g + b
def total_value_rgb(im, data, x, y):
r, g, b = data[y * im.size[0] + x]
return r + g + b
def bits_to_hexhash(bits):
return '{0:0={width}x}'.format(int(''.join([str(x) for x in bits]), 2), width = len(bits) // 4)
def blockhash_even(im, bits):
if im.mode == 'RGBA':
total_value = total_value_rgba
elif im.mode == 'RGB':
total_value = total_value_rgb
else:
raise RuntimeError('Unsupported image mode: {}'.format(im.mode))
data = im.getdata()
width, height = im.size
blocksize_x = width // bits
blocksize_y = height // bits
result = []
for y in range(bits):
for x in range(bits):
value = 0
for iy in range(blocksize_y):
for ix in range(blocksize_x):
cx = x * blocksize_x + ix
cy = y * blocksize_y + iy
value += total_value(im, data, cx, cy)
result.append(value)
m = []
for i in range(4):
m.append(median(result[i*bits*bits//4:i*bits*bits//4+bits*bits//4]))
for i in range(bits * bits):
if (((result[i] < m[0]) and (i < bits*bits/4)) or
((result[i] < m[1]) and (i >= bits*bits/4) and (i < bits*bits/2)) or
((result[i] < m[2]) and (i >= bits*bits/2) and (i < bits*bits/4+bits*bits/2)) or
((result[i] < m[3]) and (i >= bits*bits/2+bits*bits/4))):
result[i] = 0
else:
result[i] = 1
return bits_to_hexhash(result)
def blockhash(im, bits):
if im.mode == 'RGBA':
total_value = total_value_rgba
elif im.mode == 'RGB':
total_value = total_value_rgb
else:
raise RuntimeError('Unsupported image mode: {}'.format(im.mode))
data = im.getdata()
width, height = im.size
even_x = width % bits == 0
even_y = height % bits == 0
if even_x and even_y:
return blockhash_even(im, bits)
blocks = [[0 for col in range(bits)] for row in range(bits)]
block_width = float(width) / bits
block_height = float(height) / bits
for y in range(height):
if even_y:
# don't bother dividing y, if the size evenly divides by bits
block_top = block_bottom = int(y // block_height)
weight_top, weight_bottom = 1, 0
else:
y_frac, y_int = math.modf((y + 1) % block_height)
weight_top = (1 - y_frac)
weight_bottom = (y_frac)
# y_int will be 0 on bottom/right borders and on block boundaries
if y_int > 0 or (y + 1) == height:
block_top = block_bottom = int(y // block_height)
else:
block_top = int(y // block_height)
block_bottom = int(-(-y // block_height)) # int(math.ceil(float(y) / block_height))
for x in range(width):
value = total_value(im, data, x, y)
if even_x:
# don't bother dividing x, if the size evenly divides by bits
block_left = block_right = int(x // block_width)
weight_left, weight_right = 1, 0
else:
x_frac, x_int = math.modf((x + 1) % block_width)
weight_left = (1 - x_frac)
weight_right = (x_frac)
# x_int will be 0 on bottom/right borders and on block boundaries
if x_int > 0 or (x + 1) == width:
block_left = block_right = int(x // block_width)
else:
block_left = int(x // block_width)
block_right = int(-(-x // block_width)) # int(math.ceil(float(x) / block_width))
# add weighted pixel value to relevant blocks
blocks[block_top][block_left] += value * weight_top * weight_left
blocks[block_top][block_right] += value * weight_top * weight_right
blocks[block_bottom][block_left] += value * weight_bottom * weight_left
blocks[block_bottom][block_right] += value * weight_bottom * weight_right
result = [blocks[row][col] for row in range(bits) for col in range(bits)]
m = []
for i in range(4):
m.append(median(result[i*bits*bits//4:i*bits*bits//4+bits*bits//4]))
for i in range(bits * bits):
if (((result[i] < m[0]) and (i < bits*bits/4)) or
((result[i] < m[1]) and (i >= bits*bits/4) and (i < bits*bits/2)) or
((result[i] < m[2]) and (i >= bits*bits/2) and (i < bits*bits/4+bits*bits/2)) or
((result[i] < m[3]) and (i >= bits*bits/2+bits*bits/4))):
result[i] = 0
else:
result[i] = 1
return bits_to_hexhash(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--quick', type=bool, default=False,
help='Use quick hashing method. Default: False')
parser.add_argument('--bits', type=int, default=16,
help='Create hash of size N^2 bits. Default: 16')
parser.add_argument('--size',
help='Resize image to specified size before hashing, e.g. 256x256')
parser.add_argument('--interpolation', type=int, default=1, choices=[1, 2, 3, 4],
help='Interpolation method: 1 - nearest neightbor, 2 - bilinear, 3 - bicubic, 4 - antialias. Default: 1')
parser.add_argument('--debug', action='store_true',
help='Print hashes as 2D maps (for debugging)')
parser.add_argument('filenames', nargs='+')
args = parser.parse_args()
if args.interpolation == 1:
interpolation = Image.NEAREST
elif args.interpolation == 2:
interpolation = Image.BILINEAR
elif args.interpolation == 3:
interpolation = Image.BICUBIC
elif args.interpolation == 4:
interpolation = Image.ANTIALIAS
if args.quick:
method = blockhash_even
else:
method = blockhash
for fn in args.filenames:
im = Image.open(fn)
# convert indexed/grayscale images to RGB
if im.mode == '1' or im.mode == 'L' or im.mode == 'P':
im = im.convert('RGB')
elif im.mode == 'LA':
im = im.convert('RGBA')
if args.size:
size = args.size.split('x')
size = (int(size[0]), int(size[1]))
im = im.resize(size, interpolation)
hash = method(im, args.bits)
print('{} {}'.format(fn, hash))
if args.debug:
bin_hash = bin(int(hash, 16))[2:]
map = [bin_hash[i:i+args.bits] for i in range(0, len(bin_hash), args.bits)]
print("")
print("\n".join(map))
print("")
| 0.004238 |
# -*- coding: utf-8 -*-
#
from rest_framework.compat import coreapi, coreschema
from rest_framework import filters
from django.db.models import Q
from .models import Label
from assets.utils import is_query_node_all_assets, get_node
class AssetByNodeFilterBackend(filters.BaseFilterBackend):
fields = ['node', 'all']
def get_schema_fields(self, view):
return [
coreapi.Field(
name=field, location='query', required=False,
type='string', example='', description='', schema=None,
)
for field in self.fields
]
def filter_node_related_all(self, queryset, node):
return queryset.filter(
Q(nodes__key__istartswith=f'{node.key}:') |
Q(nodes__key=node.key)
).distinct()
def filter_node_related_direct(self, queryset, node):
return queryset.filter(nodes__key=node.key).distinct()
def filter_queryset(self, request, queryset, view):
node = get_node(request)
if node is None:
return queryset
query_all = is_query_node_all_assets(request)
if query_all:
return self.filter_node_related_all(queryset, node)
else:
return self.filter_node_related_direct(queryset, node)
class FilterAssetByNodeFilterBackend(filters.BaseFilterBackend):
"""
需要与 `assets.api.mixin.FilterAssetByNodeMixin` 配合使用
"""
fields = ['node', 'all']
def get_schema_fields(self, view):
return [
coreapi.Field(
name=field, location='query', required=False,
type='string', example='', description='', schema=None,
)
for field in self.fields
]
def filter_queryset(self, request, queryset, view):
node = view.node
if node is None:
return queryset
query_all = view.is_query_node_all_assets
if query_all:
return queryset.filter(
Q(nodes__key__istartswith=f'{node.key}:') |
Q(nodes__key=node.key)
).distinct()
else:
return queryset.filter(nodes__key=node.key).distinct()
class LabelFilterBackend(filters.BaseFilterBackend):
sep = ':'
query_arg = 'label'
def get_schema_fields(self, view):
example = self.sep.join(['os', 'linux'])
return [
coreapi.Field(
name=self.query_arg, location='query', required=False,
type='string', example=example, description=''
)
]
def get_query_labels(self, request):
labels_query = request.query_params.getlist(self.query_arg)
if not labels_query:
return None
q = None
for kv in labels_query:
if '#' in kv:
self.sep = '#'
if self.sep not in kv:
continue
key, value = kv.strip().split(self.sep)[:2]
if not all([key, value]):
continue
if q:
q |= Q(name=key, value=value)
else:
q = Q(name=key, value=value)
if not q:
return []
labels = Label.objects.filter(q, is_active=True)\
.values_list('id', flat=True)
return labels
def filter_queryset(self, request, queryset, view):
labels = self.get_query_labels(request)
if labels is None:
return queryset
if len(labels) == 0:
return queryset.none()
for label in labels:
queryset = queryset.filter(labels=label)
return queryset
class AssetRelatedByNodeFilterBackend(AssetByNodeFilterBackend):
def filter_node_related_all(self, queryset, node):
return queryset.filter(
Q(asset__nodes__key__istartswith=f'{node.key}:') |
Q(asset__nodes__key=node.key)
).distinct()
def filter_node_related_direct(self, queryset, node):
return queryset.filter(asset__nodes__key=node.key).distinct()
class IpInFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
ips = request.query_params.get('ips')
if not ips:
return queryset
ip_list = [i.strip() for i in ips.split(',')]
queryset = queryset.filter(ip__in=ip_list)
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name='ips', location='query', required=False, type='string',
schema=coreschema.String(
title='ips',
description='ip in filter'
)
)
]
| 0 |
from __future__ import unicode_literals
import re
import json
import random
import string
from .common import InfoExtractor
from ..utils import find_xpath_attr
class HowStuffWorksIE(InfoExtractor):
_VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*\d+-(?P<id>.+?)-video\.htm'
_TESTS = [
{
'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
'info_dict': {
'id': '450221',
'display_id': 'cool-jobs-iditarod-musher',
'ext': 'flv',
'title': 'Cool Jobs - Iditarod Musher',
'description': 'md5:82bb58438a88027b8186a1fccb365f90',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# md5 is not consistent
'skip_download': True
}
},
{
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '453464',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'ext': 'mp4',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'md5:7e1c89f6411434970c15fa094170c371',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# md5 is not consistent
'skip_download': True
}
},
{
'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
'info_dict': {
'id': '440011',
'display_id': 'sword-swallowing-1-by-dan-meyer',
'ext': 'flv',
'title': 'Sword Swallowing #1 by Dan Meyer',
'description': 'md5:b2409e88172913e2e7d3d1159b0ef735',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# md5 is not consistent
'skip_download': True
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
content_id = self._search_regex(r'var siteSectionId="(\d+)";', webpage, 'content id')
mp4 = self._search_regex(
r'''(?xs)var\s+clip\s*=\s*{\s*
.+?\s*
content_id\s*:\s*%s\s*,\s*
.+?\s*
mp4\s*:\s*\[(.*?),?\]\s*
};\s*
videoData\.push\(clip\);''' % content_id,
webpage, 'mp4', fatal=False, default=None)
smil = self._download_xml(
'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % content_id,
content_id, 'Downloading video SMIL')
http_base = find_xpath_attr(
smil,
'./{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
'name',
'httpBase').get('content')
def random_string(str_len=0):
return ''.join([random.choice(string.ascii_uppercase) for _ in range(str_len)])
URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=%s&g=%s' % (random_string(5), random_string(12))
formats = []
if mp4:
for video in json.loads('[%s]' % mp4):
bitrate = video['bitrate']
fmt = {
'url': video['src'].replace('http://pmd.video.howstuffworks.com', http_base) + URL_SUFFIX,
'format_id': bitrate,
}
m = re.search(r'(?P<vbr>\d+)[Kk]', bitrate)
if m:
fmt['vbr'] = int(m.group('vbr'))
formats.append(fmt)
else:
for video in smil.findall(
'.//{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
vbr = int(video.attrib['system-bitrate']) / 1000
formats.append({
'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
'format_id': '%dk' % vbr,
'vbr': vbr,
})
self._sort_formats(formats)
title = self._og_search_title(webpage)
TITLE_SUFFIX = ' : HowStuffWorks'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': content_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
| 0.003083 |
from bss_client.exception import BSSError
from bss_client.request import Request
class BSSClient(object):
def __init__(self, endpoint, key, secret, requests_params=None):
self.endpoint = endpoint
self.key = key
self.secret = secret
if requests_params is None:
self.requests_params = {}
else:
self.requests_params = requests_params
def create_request(self):
return Request(self.endpoint, self.key, self.secret,
**self.requests_params)
def _handle_json_response(self, rsp):
if 200 <= rsp.status_code < 300:
if rsp.text:
return rsp.json()
return ''
raise BSSError(rsp.status_code, rsp.text)
def list_services(self, category=None):
req = self.create_request()
if category:
req.add_param('category', category)
path = '/account/services'
rsp = req.request('GET', path)
return self._handle_json_response(rsp)
def list_catalog(self, expand='productBundleRevisions'):
req = self.create_request()
req.add_param('expand', expand)
rsp = req.request('GET', '/account/catalog')
return self._handle_json_response(rsp)
def list_subscriptions(self):
req = self.create_request()
rsp = req.request('GET', '/subscriptions')
return self._handle_json_response(rsp)
def get_user(self, user_id):
req = self.create_request()
rsp = req.request('GET', '/users/{0}'.format(user_id))
return self._handle_json_response(rsp)
def get_account(self):
req = self.create_request()
rsp = req.request('GET', '/account')
return self._handle_json_response(rsp)
def get_subscription(self, subscription_uuid, expand=None):
req = self.create_request()
if expand:
req.add_param('expand', expand)
rsp = req.request('GET', '/subscriptions/{0}'.format(subscription_uuid))
return self._handle_json_response(rsp)
def create_subscription(self, configurationdata, context, productbundleid,
resourcetype, serviceinstanceuuid, tenantparam=None):
req = self.create_request()
req.add_param('provision', 'true')
req.add_param('configurationdata', configurationdata)
req.add_param('context', context)
req.add_param('productbundleid', productbundleid)
req.add_param('resourcetype', resourcetype)
req.add_param('serviceinstanceuuid', serviceinstanceuuid)
if not tenantparam:
tenantparam = self.get_account()['account']['uuid']
req.add_param('tenantparam', tenantparam)
rsp = req.request('POST', '/subscriptions')
return self._handle_json_response(rsp)
def delete_subscription(self, subscription_uuid):
req = self.create_request()
rsp = req.request('DELETE', '/subscriptions/{0}'.format(subscription_uuid))
return self._handle_json_response(rsp)
| 0.000985 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup
PACKAGE_VERSION = '0.7'
# dependencies
deps = ['mozfile >= 0.12']
setup(name='mozinfo',
version=PACKAGE_VERSION,
description="Library to get system information for use in Mozilla testing",
long_description="see http://mozbase.readthedocs.org",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Testing Team',
author_email='[email protected]',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozinfo'],
include_package_data=True,
zip_safe=False,
install_requires=deps,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozinfo = mozinfo:main
""",
)
| 0.00289 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016 Michael Gruener <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import binascii
import copy
import textwrap
from datetime import datetime
DOCUMENTATION = '''
---
module: letsencrypt
author: "Michael Gruener (@mgruener)"
version_added: "2.2"
short_description: Create SSL certificates with Let's Encrypt
description:
- "Create and renew SSL certificates with Let's Encrypt. Let’s Encrypt is a
free, automated, and open certificate authority (CA), run for the
public’s benefit. For details see U(https://letsencrypt.org). The current
implementation supports the http-01, tls-sni-02 and dns-01 challenges."
- "To use this module, it has to be executed at least twice. Either as two
different tasks in the same run or during multiple runs."
- "Between these two tasks you have to fulfill the required steps for the
choosen challenge by whatever means necessary. For http-01 that means
creating the necessary challenge file on the destination webserver. For
dns-01 the necessary dns record has to be created. tls-sni-02 requires
you to create a SSL certificate with the appropriate subjectAlternativeNames.
It is I(not) the responsibility of this module to perform these steps."
- "For details on how to fulfill these challenges, you might have to read through
U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
- "Although the defaults are choosen so that the module can be used with
the Let's Encrypt CA, the module can be used with any service using the ACME
protocol."
requirements:
- "python >= 2.6"
- openssl
options:
account_key:
description:
- "File containing the the Let's Encrypt account RSA key."
- "Can be created with C(openssl rsa ...)."
required: true
account_email:
description:
- "The email address associated with this account."
- "It will be used for certificate expiration warnings."
required: false
default: null
acme_directory:
description:
- "The ACME directory to use. This is the entry point URL to access
CA server API."
- "For safety reasons the default is set to the Let's Encrypt staging server.
This will create technically correct, but untrusted certificates."
required: false
default: https://acme-staging.api.letsencrypt.org/directory
agreement:
description:
- "URI to a terms of service document you agree to when using the
ACME service at C(acme_directory)."
required: false
default: 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf'
challenge:
description: The challenge to be performed.
required: false
choices: [ 'http-01', 'dns-01', 'tls-sni-02']
default: 'http-01'
csr:
description:
- "File containing the CSR for the new certificate."
- "Can be created with C(openssl csr ...)."
- "The CSR may contain multiple Subject Alternate Names, but each one
will lead to an individual challenge that must be fulfilled for the
CSR to be signed."
required: true
alias: ['src']
data:
description:
- "The data to validate ongoing challenges."
- "The value that must be used here will be provided by a previous use
of this module."
required: false
default: null
dest:
description: The destination file for the certificate.
required: true
alias: ['cert']
remaining_days:
description:
- "The number of days the certificate must have left being valid before it
will be renewed."
required: false
default: 10
'''
EXAMPLES = '''
- letsencrypt:
account_key: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
register: sample_com_challenge
# perform the necessary steps to fulfill the challenge
# for example:
#
# - copy:
# dest: /var/www/html/{{ sample_com_http_challenge['challenge_data']['sample.com']['http-01']['resource'] }}
# content: "{{ sample_com_http_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}"
# when: sample_com_challenge|changed
- letsencrypt:
account_key: /etc/pki/cert/private/account.key
csr: /etc/pki/cert/csr/sample.com.csr
dest: /etc/httpd/ssl/sample.com.crt
data: "{{ sample_com_challenge }}"
'''
RETURN = '''
cert_days:
description: the number of days the certificate remains valid.
returned: success
challenge_data:
description: per domain / challenge type challenge data
returned: changed
type: dictionary
contains:
resource:
description: the challenge resource that must be created for validation
returned: changed
type: string
sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA
resource_value:
description: the value the resource has to produce for the validation
returned: changed
type: string
sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA
authorizations:
description: ACME authorization data.
returned: changed
type: list
contains:
authorization:
description: ACME authorization object. See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1.2
returned: success
type: dict
'''
def nopad_b64(data):
return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
def simple_get(module,url):
resp, info = fetch_url(module, url, method='GET')
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
if content:
if info['content-type'].startswith('application/json'):
try:
result = module.from_json(content.decode('utf8'))
except ValueError:
module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
else:
result = content
if info['status'] >= 400:
module.fail_json(msg="ACME request failed: CODE: {0} RESULT:{1}".format(info['status'],result))
return result
def get_cert_days(module,cert_file):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found.
'''
if not os.path.exists(cert_file):
return -1
openssl_bin = module.get_bin_path('openssl', True)
openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"]
_, out, _ = module.run_command(openssl_cert_cmd,check_rc=True)
try:
not_after_str = re.search(r"\s+Not After\s*:\s+(.*)",out.decode('utf8')).group(1)
not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str,'%b %d %H:%M:%S %Y %Z')))
except AttributeError:
module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file))
except ValueError:
module.fail_json(msg="Faild to parse 'Not after' date of {0}".format(cert_file))
now = datetime.datetime.utcnow()
return (not_after - now).days
# function source: network/basics/uri.py
def write_file(module, dest, content):
'''
Write content to destination file dest, only if the content
has changed.
'''
changed = False
# create a tempfile with some test content
_, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception as err:
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
changed = True
except Exception as err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
return changed
class ACMEDirectory(object):
'''
The ACME server directory. Gives access to the available resources
and the Replay-Nonce for a given uri. This only works for
uris that permit GET requests (so normally not the ones that
require authentication).
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2
'''
def __init__(self, module):
self.module = module
self.directory_root = module.params['acme_directory']
self.directory = simple_get(self.module,self.directory_root)
def __getitem__(self, key): return self.directory[key]
def get_nonce(self,resource=None):
url = self.directory_root
if resource is not None:
url = resource
_, info = fetch_url(self.module, url, method='HEAD')
if info['status'] != 200:
self.module.fail_json(msg="Failed to get replay-nonce, got status {0}".format(info['status']))
return info['replay-nonce']
class ACMEAccount(object):
'''
ACME account object. Handles the authorized communication with the
ACME server. Provides access to accound bound information like
the currently active authorizations and valid certificates
'''
def __init__(self,module):
self.module = module
self.agreement = module.params['agreement']
self.key = module.params['account_key']
self.email = module.params['account_email']
self.data = module.params['data']
self.directory = ACMEDirectory(module)
self.uri = None
self.changed = False
self._authz_list_uri = None
self._certs_list_uri = None
if not os.path.exists(self.key):
module.fail_json(msg="Account key %s not found" % (self.key))
self._openssl_bin = module.get_bin_path('openssl', True)
pub_hex, pub_exp = self._parse_account_key(self.key)
self.jws_header = {
"alg": "RS256",
"jwk": {
"e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"kty": "RSA",
"n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
}
self.init_account()
def get_keyauthorization(self,token):
'''
Returns the key authorization for the given token
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.1
'''
accountkey_json = json.dumps(self.jws_header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
return "{0}.{1}".format(token, thumbprint)
def _parse_account_key(self,key):
'''
Parses an RSA key file in PEM format and returns the modulus
and public exponent of the key
'''
openssl_keydump_cmd = [self._openssl_bin, "rsa", "-in", key, "-noout", "-text"]
_, out, _ = self.module.run_command(openssl_keydump_cmd,check_rc=True)
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
if len(pub_exp) % 2:
pub_exp = "0{0}".format(pub_exp)
return pub_hex, pub_exp
def send_signed_request(self, url, payload):
'''
Sends a JWS signed HTTP POST request to the ACME server and returns
the response as dictionary
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.2
'''
protected = copy.deepcopy(self.jws_header)
protected["nonce"] = self.directory.get_nonce()
try:
payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8'))
protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
except Exception as e:
self.module.fail_json(msg="Failed to encode payload / headers as JSON: {0}".format(e))
openssl_sign_cmd = [self._openssl_bin, "dgst", "-sha256", "-sign", self.key]
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
_, out, _ = self.module.run_command(openssl_sign_cmd,data=sign_payload,check_rc=True, binary_data=True)
data = self.module.jsonify({
"header": self.jws_header,
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(out),
})
resp, info = fetch_url(self.module, url, data=data, method='POST')
result = None
try:
content = resp.read()
except AttributeError:
if info['body']:
content = info['body']
if content:
if info['content-type'].startswith('application/json'):
try:
result = self.module.from_json(content.decode('utf8'))
except ValueError:
self.module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content))
else:
result = content
return result,info
def _new_reg(self,contact=[]):
'''
Registers a new ACME account. Returns True if the account was
created and False if it already existed (e.g. it was not newly
created)
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
'''
if self.uri is not None:
return True
new_reg = {
'resource': 'new-reg',
'agreement': self.agreement,
'contact': contact
}
result, info = self.send_signed_request(self.directory['new-reg'], new_reg)
if 'location' in info:
self.uri = info['location']
if info['status'] in [200,201]:
# Account did not exist
self.changed = True
return True
elif info['status'] == 409:
# Account did exist
return False
else:
self.module.fail_json(msg="Error registering: {0} {1}".format(info['status'], result))
def init_account(self):
'''
Create or update an account on the ACME server. As the only way
(without knowing an account URI) to test if an account exists
is to try and create one with the provided account key, this
method will always result in an account being present (except
on error situations). If the account already exists, it will
update the contact information.
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3
'''
contact = []
if self.email:
contact.append('mailto:' + self.email)
# if this is not a new registration (e.g. existing account)
if not self._new_reg(contact):
# pre-existing account, get account data...
result, _ = self.send_signed_request(self.uri, {'resource':'reg'})
# XXX: letsencrypt/boulder#1435
if 'authorizations' in result:
self._authz_list_uri = result['authorizations']
if 'certificates' in result:
self._certs_list_uri = result['certificates']
# ...and check if update is necessary
do_update = False
if 'contact' in result:
if cmp(contact,result['contact']) != 0:
do_update = True
elif len(contact) > 0:
do_update = True
if do_update:
upd_reg = result
upd_reg['contact'] = contact
result, _ = self.send_signed_request(self.uri, upd_reg)
self.changed = True
def get_authorizations(self):
'''
Return a list of currently active authorizations
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
authz_list = {'authorizations': []}
if self._authz_list_uri is None:
# XXX: letsencrypt/boulder#1435
# Workaround, retrieve the known authorization urls
# from the data attribute
# It is also a way to limit the queried authorizations, which
# might become relevant at some point
if (self.data is not None) and ('authorizations' in self.data):
for auth in self.data['authorizations']:
authz_list['authorizations'].append(auth['uri'])
else:
return []
else:
# TODO: need to handle pagination
authz_list = simple_get(self.module, self._authz_list_uri)
authz = []
for auth_uri in authz_list['authorizations']:
auth = simple_get(self.module,auth_uri)
auth['uri'] = auth_uri
authz.append(auth)
return authz
class ACMEClient(object):
'''
ACME client class. Uses an ACME account object and a CSR to
start and validate ACME challenges and download the respective
certificates.
'''
def __init__(self,module):
self.module = module
self.challenge = module.params['challenge']
self.csr = module.params['csr']
self.dest = module.params['dest']
self.account = ACMEAccount(module)
self.directory = self.account.directory
self.authorizations = self.account.get_authorizations()
self.cert_days = -1
self.changed = self.account.changed
if not os.path.exists(self.csr):
module.fail_json(msg="CSR %s not found" % (self.csr))
self._openssl_bin = module.get_bin_path('openssl', True)
self.domains = self._get_csr_domains()
def _get_csr_domains(self):
'''
Parse the CSR and return the list of requested domains
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"]
_, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
return domains
def _get_domain_auth(self,domain):
'''
Get the status string of the first authorization for the given domain.
Return None if no active authorization for the given domain was found.
'''
if self.authorizations is None:
return None
for auth in self.authorizations:
if (auth['identifier']['type'] == 'dns') and (auth['identifier']['value'] == domain):
return auth
return None
def _add_or_update_auth(self,auth):
'''
Add or update the given authroization in the global authorizations list.
Return True if the auth was updated/added and False if no change was
necessary.
'''
for index,cur_auth in enumerate(self.authorizations):
if (cur_auth['uri'] == auth['uri']):
# does the auth parameter contain updated data?
if cmp(cur_auth,auth) != 0:
# yes, update our current authorization list
self.authorizations[index] = auth
return True
else:
return False
# this is a new authorization, add it to the list of current
# authorizations
self.authorizations.append(auth)
return True
def _new_authz(self,domain):
'''
Create a new authorization for the given domain.
Return the authorization object of the new authorization
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4
'''
if self.account.uri is None:
return
new_authz = {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
}
result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz)
if info['status'] not in [200,201]:
self.module.fail_json(msg="Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
result['uri'] = info['location']
return result
def _get_challenge_data(self,auth):
'''
Returns a dict with the data for all proposed (and supported) challenges
of the given authorization.
'''
data = {}
# no need to choose a specific challenge here as this module
# is not responsible for fulfilling the challenges. Calculate
# and return the required information for each challenge.
for challenge in auth['challenges']:
type = challenge['type']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
# NOTE: tls-sni-01 is not supported by choice
# too complex to be usefull and tls-sni-02 is an alternative
# as soon as it is implemented server side
if type == 'http-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
resource = '.well-known/acme-challenge/' + token
value = keyauthorization
elif type == 'tls-sni-02':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.3
token_digest = hashlib.sha256(token.encode('utf8')).hexdigest()
ka_digest = hashlib.sha256(keyauthorization.encode('utf8')).hexdigest()
len_token_digest = len(token_digest)
len_ka_digest = len(ka_digest)
resource = 'subjectAlternativeNames'
value = [
"{0}.{1}.token.acme.invalid".format(token_digest[:len_token_digest/2],token_digest[len_token_digest/2:]),
"{0}.{1}.ka.acme.invalid".format(ka_digest[:len_ka_digest/2],ka_digest[len_ka_digest/2:]),
]
elif type == 'dns-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.4
resource = '_acme-challenge'
value = nopad_b64(hashlib.sha256(keyauthorization).digest()).encode('utf8')
else:
continue
data[type] = { 'resource': resource, 'resource_value': value }
return data
def _validate_challenges(self,auth):
'''
Validate the authorization provided in the auth dict. Returns True
when the validation was successfull and False when it was not.
'''
for challenge in auth['challenges']:
if self.challenge != challenge['type']:
continue
uri = challenge['uri']
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = self.account.get_keyauthorization(token)
challenge_response = {
"resource": "challenge",
"keyAuthorization": keyauthorization,
}
result, info = self.account.send_signed_request(uri, challenge_response)
if info['status'] not in [200,202]:
self.module.fail_json(msg="Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result))
status = ''
while status not in ['valid','invalid','revoked']:
result = simple_get(self.module,auth['uri'])
result['uri'] = auth['uri']
if self._add_or_update_auth(result):
self.changed = True
# draft-ietf-acme-acme-02
# "status (required, string): ...
# If this field is missing, then the default value is "pending"."
if 'status' not in result:
status = 'pending'
else:
status = result['status']
time.sleep(2)
if status == 'invalid':
error_details = ''
# multiple challenges could have failed at this point, gather error
# details for all of them before failing
for challenge in result['challenges']:
if challenge['status'] == 'invalid':
error_details += ' CHALLENGE: {0}'.format(challenge['type'])
if 'error' in challenge:
error_details += ' DETAILS: {0};'.format(challenge['error']['detail'])
else:
error_details += ';'
self.module.fail_json(msg="Authorization for {0} returned invalid: {1}".format(result['identifier']['value'],error_details))
return status == 'valid'
def _new_cert(self):
'''
Create a new certificate based on the csr.
Return the certificate object as dict
https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5
'''
openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"]
_, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True)
new_cert = {
"resource": "new-cert",
"csr": nopad_b64(out),
}
result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert)
if info['status'] not in [200,201]:
self.module.fail_json(msg="Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result))
else:
return {'cert': result, 'uri': info['location']}
def _der_to_pem(self,der_cert):
'''
Convert the DER format certificate in der_cert to a PEM format
certificate and return it.
'''
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64)))
def do_challenges(self):
'''
Create new authorizations for all domains of the CSR and return
the challenge details for the choosen challenge type.
'''
data = {}
for domain in self.domains:
auth = self._get_domain_auth(domain)
if auth is None:
new_auth = self._new_authz(domain)
self._add_or_update_auth(new_auth)
data[domain] = self._get_challenge_data(new_auth)
self.changed = True
elif (auth['status'] == 'pending') or ('status' not in auth):
# draft-ietf-acme-acme-02
# "status (required, string): ...
# If this field is missing, then the default value is "pending"."
self._validate_challenges(auth)
# _validate_challenges updates the global authrozation dict,
# so get the current version of the authorization we are working
# on to retrieve the challenge data
data[domain] = self._get_challenge_data(self._get_domain_auth(domain))
return data
def get_certificate(self):
'''
Request a new certificate and write it to the destination file.
Only do this if a destination file was provided and if all authorizations
for the domains of the csr are valid. No Return value.
'''
if self.dest is None:
return
for domain in self.domains:
auth = self._get_domain_auth(domain)
if auth is None or ('status' not in auth) or (auth['status'] != 'valid'):
return
cert = self._new_cert()
if cert['cert'] is not None:
pem_cert = self._der_to_pem(cert['cert'])
if write_file(self.module,self.dest,pem_cert):
self.cert_days = get_cert_days(self.module,self.dest)
self.changed = True
def main():
module = AnsibleModule(
argument_spec = dict(
account_key = dict(required=True, type='path'),
account_email = dict(required=False, default=None, type='str'),
acme_directory = dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'),
agreement = dict(required=False, default='https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf', type='str'),
challenge = dict(required=False, default='http-01', choices=['http-01', 'dns-01', 'tls-sni-02'], type='str'),
csr = dict(required=True, aliases=['src'], type='path'),
data = dict(required=False, no_log=True, default=None, type='dict'),
dest = dict(required=True, aliases=['cert'], type='path'),
remaining_days = dict(required=False, default=10, type='int'),
),
supports_check_mode = True,
)
cert_days = get_cert_days(module,module.params['dest'])
if cert_days < module.params['remaining_days']:
# If checkmode is active, base the changed state solely on the status
# of the certificate file as all other actions (accessing an account, checking
# the authorization status...) would lead to potential changes of the current
# state
if module.check_mode:
module.exit_json(changed=True,authorizations={},
challenge_data={},cert_days=cert_days)
else:
client = ACMEClient(module)
client.cert_days = cert_days
data = client.do_challenges()
client.get_certificate()
module.exit_json(changed=client.changed,authorizations=client.authorizations,
challenge_data=data,cert_days=client.cert_days)
else:
module.exit_json(changed=False,cert_days=cert_days)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| 0.005918 |
################################################################################
#
# Copyright 2014-2016 Eric Lacombe <[email protected]>
#
################################################################################
#
# This file is part of fuddly.
#
# fuddly is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fuddly is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fuddly. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
try:
import xtermcolor
from xtermcolor import colorize
xtermcolor.isatty = lambda x: True
except ImportError:
print("WARNING [FMK]: python-xtermcolor module is not installed, colors won't be available!")
def colorize(string, rgb=None, ansi=None, bg=None, ansi_bg=None, fd=1):
return string
class Color(object):
TITLE = 0x0099FF #0x1947D1
PROMPT = 0x6699FF
DATE = 0x00FF00
SELECTED = 0x0030FF
FMKHLIGHT = 0xFFFFFF
FMKINFOGROUP = 0x1975FF
FMKINFOSUBGROUP = 0x66CCFF
FMKINFO = 0x66FFFF
FMKSUBINFO = 0xD0D0C0
FMKINFO_HLIGHT = 0x00FF00
INFO = 0xFF9900
SUBINFO = 0xE6E68A
INFO_ALT = 0x0055FF
INFO_ALT_HLIGHT = 0x00FF00
SUBINFO_ALT = 0x66FFFF
SUBINFO_ALT_HLIGHT = 0x800080
WARNING = 0xFFA500
ERROR = 0xEF0000
COMPONENT_INFO = 0x339966
COMPONENT_START = 0x00FF00
COMPONENT_STOP = 0x4775A3
DATA_MODEL_LOADED = 0xB03BB0
FEEDBACK = 0x800080
FEEDBACK_ERR = 0xEF0000
FEEDBACK_HLIGHT = 0xFFFFFF
NEWLOGENTRY = 0x1975FF
DMAKERSTEP = 0x009D9D
LOGSECTION = 0x638C8C
DISABLED = 0x7D7D7D
DATAINFO = 0x8CAFCF
DATAINFO_ALT = 0xA0A0A0
COMMENTS = 0x00FF00
ND_NONTERM = 0xEF0000
ND_CONTENTS = 0x00FF00
ND_RAW = 0x7D7D7D
ND_RAW_HLIGHT = 0xE5E5E5
ND_NAME = 0x1975FF
ND_TYPE = 0x66FFFF
ND_DUPLICATED = 0x800080
ND_SEPARATOR = 0x008000
ND_ENCODED = 0xFFA500
ND_CUSTO = 0x800080
@staticmethod
def display():
for c in dir(Color):
if not c.startswith('__') and c != 'display':
print(colorize(c, rgb=object.__getattribute__(Color, c)))
class FontStyle:
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
graphviz_module = True
try:
import graphviz
except ImportError:
graphviz_module = False
print('WARNING [FMK]: python(3)-graphviz module is not installed, Scenario could not be visualized!')
sqlite3_module = True
try:
import sqlite3
except ImportError:
sqlite3_module = False
print('WARNING [FMK]: SQLite3 not installed, FmkDB will not be available!')
cups_module = True
try:
import cups
except ImportError:
cups_module = False
print('WARNING [FMK]: python(3)-cups module is not installed, Printer targets will not be available!')
crcmod_module = True
try:
import crcmod
except ImportError:
crcmod_module = False
print('WARNING [FMK]: python(3)-crcmod module is not installed, the MH.CRC()' \
' generator template will not be available!')
ssh_module = True
try:
import paramiko as ssh
except ImportError:
ssh_module = False
print('WARNING [FMK]: python(3)-paramiko module is not installed! '
'Should be installed for ssh-based monitoring.')
serial_module = True
try:
import serial
except ImportError:
serial_module = False
print('WARNING [FMK]: python(3)-serial module is not installed! '
'Should be installed for serial-based Target.')
| 0.003539 |
# coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training of model-based RL agents.
Example invocation:
python -m tensor2tensor.rl.trainer_model_based \
--output_dir=$HOME/t2t/rl_v1 \
--loop_hparams_set=rlmb_base \
--loop_hparams='num_real_env_frames=10000,epochs=3'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import math
import os
import pprint
import random
import time
import six
from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import
from tensor2tensor.models.research import rl
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params
from tensor2tensor.rl.dopamine_connector import DQNLearner # pylint: disable=unused-import
from tensor2tensor.rl.restarter import Restarter
from tensor2tensor.utils import trainer_lib
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
def real_env_step_increment(hparams):
"""Real env step increment."""
return int(math.ceil(
hparams.num_real_env_frames / hparams.epochs
))
def world_model_step_increment(hparams, epoch):
if epoch in [0, 1, 4, 9, 14]:
multiplier = hparams.initial_epoch_train_steps_multiplier
else:
multiplier = 1
return multiplier * hparams.model_train_steps
def setup_directories(base_dir, subdirs):
"""Setup directories."""
base_dir = os.path.expanduser(base_dir)
tf.gfile.MakeDirs(base_dir)
all_dirs = {}
for subdir in subdirs:
if isinstance(subdir, six.string_types):
subdir_tuple = (subdir,)
else:
subdir_tuple = subdir
dir_name = os.path.join(base_dir, *subdir_tuple)
tf.gfile.MakeDirs(dir_name)
all_dirs[subdir] = dir_name
return all_dirs
def make_relative_timing_fn():
"""Make a function that logs the duration since it was made."""
start_time = time.time()
def format_relative_time():
time_delta = time.time() - start_time
return str(datetime.timedelta(seconds=time_delta))
def log_relative_time():
tf.logging.info("Timing: %s", format_relative_time())
return log_relative_time
def make_log_fn(epoch, log_relative_time_fn):
def log(msg, *args):
msg %= args
tf.logging.info("%s Epoch %d: %s", ">>>>>>>", epoch, msg)
log_relative_time_fn()
return log
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts."""
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)]
def train_supervised(problem, model_name, hparams, data_dir, output_dir,
train_steps, eval_steps, local_eval_frequency=None,
schedule="continuous_train_and_eval"):
"""Train supervised."""
if local_eval_frequency is None:
local_eval_frequency = FLAGS.local_eval_frequency
exp_fn = trainer_lib.create_experiment_fn(
model_name, problem, data_dir, train_steps, eval_steps,
min_eval_frequency=local_eval_frequency
)
run_config = trainer_lib.create_run_config(model_name, model_dir=output_dir)
exp = exp_fn(run_config, hparams)
getattr(exp, schedule)()
def train_agent(real_env, learner, world_model_dir, hparams, epoch):
"""Train the PPO agent in the simulated environment."""
initial_frame_chooser = rl_utils.make_initial_frame_chooser(
real_env, hparams.frame_stack_size, hparams.simulation_random_starts,
hparams.simulation_flip_first_random_for_beginning
)
env_fn = rl.make_simulated_env_fn_from_hparams(
real_env, hparams, batch_size=hparams.simulated_batch_size,
initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir,
sim_video_dir=os.path.join(
learner.agent_model_dir, "sim_videos_{}".format(epoch)
)
)
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
if hparams.wm_policy_param_sharing:
train_hparams.optimizer_zero_grads = True
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, base_algo_str + "_"
)
final_epoch = hparams.epochs - 1
is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch
is_special_epoch = is_special_epoch or (epoch == 1) # Make 1 special too.
is_final_epoch = epoch == final_epoch
env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1
learner.train(
env_fn, train_hparams, simulated=True, save_continuously=True,
epoch=epoch, env_step_multiplier=env_step_multiplier
)
def train_agent_real_env(env, learner, hparams, epoch):
"""Train the PPO agent in the real environment."""
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, "real_" + base_algo_str + "_"
)
if hparams.wm_policy_param_sharing:
train_hparams.optimizer_zero_grads = True
env_fn = rl.make_real_env_fn(env)
num_env_steps = real_env_step_increment(hparams)
learner.train(
env_fn,
train_hparams,
simulated=False,
save_continuously=False,
epoch=epoch,
sampling_temp=hparams.real_sampling_temp,
num_env_steps=num_env_steps,
)
# Save unfinished rollouts to history.
env.reset()
def train_world_model(
env, data_dir, output_dir, hparams, world_model_steps_num, epoch
):
"""Train the world model on problem_name."""
world_model_steps_num += world_model_step_increment(hparams, epoch)
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
model_hparams.learning_rate = model_hparams.learning_rate_constant
if epoch > 0:
model_hparams.learning_rate *= hparams.learning_rate_bump
if hparams.wm_policy_param_sharing:
model_hparams.optimizer_zero_grads = True
restarter = Restarter("world_model", output_dir, world_model_steps_num)
if restarter.should_skip:
return world_model_steps_num
with restarter.training_loop():
train_supervised(
problem=env,
model_name=hparams.generative_model,
hparams=model_hparams,
data_dir=data_dir,
output_dir=output_dir,
train_steps=restarter.target_global_step,
eval_steps=100,
local_eval_frequency=2000
)
return world_model_steps_num
def load_metrics(event_dir, epoch):
"""Loads metrics for this epoch if they have already been written.
This reads the entire event file but it's small with just per-epoch metrics.
Args:
event_dir: TODO(koz4k): Document this.
epoch: TODO(koz4k): Document this.
Returns:
metrics.
"""
metrics = {}
for filename in tf.gfile.ListDirectory(event_dir):
path = os.path.join(event_dir, filename)
for event in tf.train.summary_iterator(path):
if event.step == epoch and event.HasField("summary"):
value = event.summary.value[0]
metrics[value.tag] = value.simple_value
return metrics
def training_loop(hparams, output_dir, report_fn=None, report_metric=None):
"""Run the main training loop."""
if report_fn:
assert report_metric is not None
# Directories
subdirectories = [
"data", "tmp", "world_model", ("world_model", "debug_videos"),
"policy", "eval_metrics"
]
directories = setup_directories(output_dir, subdirectories)
epoch = -1
data_dir = directories["data"]
env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops,
rl_env_max_episode_steps=hparams.rl_env_max_episode_steps
)
env.start_new_epoch(epoch, data_dir)
if hparams.wm_policy_param_sharing:
policy_model_dir = directories["world_model"]
else:
policy_model_dir = directories["policy"]
learner = rl_utils.LEARNERS[hparams.base_algo](
hparams.frame_stack_size, policy_model_dir,
policy_model_dir, hparams.epochs
)
# Timing log function
log_relative_time = make_relative_timing_fn()
# Per-epoch state
epoch_metrics = []
metrics = {}
# Collect data from the real environment.
policy_model_dir = directories["policy"]
tf.logging.info("Initial training of the policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
tf.logging.info("Mean training reward (initial): {}".format(
metrics["mean_reward/train/clipped"]
))
env.generate_data(data_dir)
eval_metrics_writer = tf.summary.FileWriter(
directories["eval_metrics"]
)
world_model_steps_num = 0
for epoch in range(hparams.epochs):
log = make_log_fn(epoch, log_relative_time)
# Train world model
log("Training world model")
world_model_steps_num = train_world_model(
env, data_dir, directories["world_model"], hparams,
world_model_steps_num, epoch
)
# Train agent
log("Training policy in simulated environment.")
train_agent(env, learner, directories["world_model"], hparams, epoch)
env.start_new_epoch(epoch, data_dir)
# Train agent on real env (short)
log("Training policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
if hparams.stop_loop_early:
return 0.0
env.generate_data(data_dir)
metrics = load_metrics(directories["eval_metrics"], epoch)
if metrics:
# Skip eval if metrics have already been written for this epoch. Otherwise
# we'd overwrite them with wrong data.
log("Metrics found for this epoch, skipping evaluation.")
else:
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
log("Mean training reward: {}".format(
metrics["mean_reward/train/clipped"]
))
eval_metrics = rl_utils.evaluate_all_configs(hparams, policy_model_dir)
log("Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics)))
metrics.update(eval_metrics)
if hparams.eval_world_model:
debug_video_path = os.path.join(
directories["world_model", "debug_videos"],
"{}.avi".format(env.current_epoch)
)
wm_metrics = rl_utils.evaluate_world_model(
env, hparams, directories["world_model"], debug_video_path
)
log("World model eval metrics:\n{}".format(pprint.pformat(wm_metrics)))
metrics.update(wm_metrics)
rl_utils.summarize_metrics(eval_metrics_writer, metrics, epoch)
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=hparams.eval_sampling_temps[0],
max_num_noops=hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], epoch)
else:
report_fn(eval_metrics[report_metric], epoch)
epoch_metrics.append(metrics)
# Return the evaluation metrics from the final epoch
return epoch_metrics[-1]
def main(_):
hp = trainer_model_based_params.create_loop_hparams()
assert not FLAGS.job_dir_to_evaluate
training_loop(hp, FLAGS.output_dir)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 0.010005 |
# Authors: Michael Eickenberg
# Kyle Kastner
# License: BSD 3 Clause
import os
import theano
# Required to avoid fuse errors... very strange
theano.config.floatX = 'float32'
import zipfile
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from .overfeat_class_labels import get_overfeat_class_label
from .overfeat_class_labels import get_all_overfeat_labels
from .overfeat_class_labels import get_all_overfeat_leaves
from ..datasets import get_dataset_dir, download
from ..base import (Convolution, MaxPool, PassThrough,
Standardize, ZeroPad, Relu, fuse)
from ..utils import check_tensor
# better get it from a config file
NETWORK_WEIGHTS_PATH = get_dataset_dir("overfeat_weights")
SMALL_NETWORK_WEIGHT_FILE = 'net_weight_0'
SMALL_NETWORK_FILTER_SHAPES = np.array([(96, 3, 11, 11),
(256, 96, 5, 5),
(512, 256, 3, 3),
(1024, 512, 3, 3),
(1024, 1024, 3, 3),
(3072, 1024, 6, 6),
(4096, 3072, 1, 1),
(1000, 4096, 1, 1)])
SMALL_NETWORK_BIAS_SHAPES = SMALL_NETWORK_FILTER_SHAPES[:, 0]
SMALL_NETWORK = (SMALL_NETWORK_WEIGHT_FILE,
SMALL_NETWORK_FILTER_SHAPES,
SMALL_NETWORK_BIAS_SHAPES)
LARGE_NETWORK_WEIGHT_FILE = 'net_weight_1'
LARGE_NETWORK_FILTER_SHAPES = np.array([(96, 3, 7, 7),
(256, 96, 7, 7),
(512, 256, 3, 3),
(512, 512, 3, 3),
(1024, 512, 3, 3),
(1024, 1024, 3, 3),
(4096, 1024, 5, 5),
(4096, 4096, 1, 1),
(1000, 4096, 1, 1)])
LARGE_NETWORK_BIAS_SHAPES = LARGE_NETWORK_FILTER_SHAPES[:, 0]
LARGE_NETWORK = (LARGE_NETWORK_WEIGHT_FILE,
LARGE_NETWORK_FILTER_SHAPES,
LARGE_NETWORK_BIAS_SHAPES)
def fetch_overfeat_weights_and_biases(large_network=False, weights_file=None):
network = LARGE_NETWORK if large_network else SMALL_NETWORK
fname, weight_shapes, bias_shapes = network
if weights_file is None:
weights_file = os.path.join(NETWORK_WEIGHTS_PATH, fname)
if not os.path.exists(weights_file):
url = "https://dl.dropboxusercontent.com/u/15378192/net_weights.zip"
if not os.path.exists(NETWORK_WEIGHTS_PATH):
os.makedirs(NETWORK_WEIGHTS_PATH)
full_path = os.path.join(NETWORK_WEIGHTS_PATH, "net_weights.zip")
if not os.path.exists(full_path):
download(url, full_path, progress_update_percentage=1)
zip_obj = zipfile.ZipFile(full_path, 'r')
zip_obj.extractall(NETWORK_WEIGHTS_PATH)
zip_obj.close()
memmap = np.memmap(weights_file, dtype=np.float32)
mempointer = 0
weights = []
biases = []
for weight_shape, bias_shape in zip(weight_shapes, bias_shapes):
filter_size = np.prod(weight_shape)
weights.append(
memmap[mempointer:mempointer + filter_size].reshape(weight_shape))
mempointer += filter_size
biases.append(memmap[mempointer:mempointer + bias_shape])
mempointer += bias_shape
return weights, biases
def _get_architecture(large_network=False, weights_and_biases=None,
detailed=False):
if weights_and_biases is None:
weights_and_biases = fetch_overfeat_weights_and_biases(large_network)
weights, biases = weights_and_biases
# flip weights to make Xcorr
ws = [w[:, :, ::-1, ::-1] for w in weights]
bs = biases
if large_network and not detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(2, 2),
activation='relu'),
MaxPool((3, 3)),
Convolution(ws[1], bs[1], activation='relu'),
MaxPool((2, 2)),
Convolution(ws[2], bs[2],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[3], bs[3],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[4], bs[4],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[5], bs[5],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
MaxPool((3, 3)),
Convolution(ws[6], bs[6],
activation='relu'),
Convolution(ws[7], bs[7],
activation='relu'),
Convolution(ws[8], bs[8],
activation='identity')]
elif not large_network and not detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(4, 4),
activation='relu'),
MaxPool((2, 2)),
Convolution(ws[1], bs[1], activation='relu'),
MaxPool((2, 2)),
Convolution(ws[2], bs[2],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[3], bs[3],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[4], bs[4],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
MaxPool((2, 2)),
Convolution(ws[5], bs[5],
activation='relu'),
Convolution(ws[6], bs[6],
activation='relu'),
Convolution(ws[7], bs[7],
activation='identity')]
elif large_network and detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(2, 2),
activation=None),
Relu(),
MaxPool((3, 3)),
Convolution(ws[1], bs[1], activation=None),
Relu(),
MaxPool((2, 2)),
ZeroPad(1),
Convolution(ws[2], bs[2], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[3], bs[3], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[4], bs[4], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[5], bs[5], activation=None),
Relu(),
MaxPool((3, 3)),
Convolution(ws[6], bs[6], activation=None),
Relu(),
Convolution(ws[7], bs[7], activation=None),
Relu(),
Convolution(ws[8], bs[8], activation=None)
]
elif not large_network and detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(4, 4), activation=None),
Relu(),
MaxPool((2, 2)),
Convolution(ws[1], bs[1], activation=None),
Relu(),
MaxPool((2, 2)),
ZeroPad(1),
Convolution(ws[2], bs[2], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[3], bs[3], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[4], bs[4], activation=None),
Relu(),
MaxPool((2, 2)),
Convolution(ws[5], bs[5], activation=None),
Relu(),
Convolution(ws[6], bs[6], activation=None),
Relu(),
Convolution(ws[7], bs[7], activation=None)
]
return architecture
def _get_fprop(large_network=False, output_layers=[-1], detailed=False):
arch = _get_architecture(large_network, detailed=detailed)
expressions, input_var = fuse(arch, output_expressions=output_layers,
input_dtype='float32')
fprop = theano.function([input_var], expressions)
return fprop
class OverfeatTransformer(BaseEstimator, TransformerMixin):
"""
A transformer/feature extractor for images using the OverFeat neural network.
Parameters
----------
large_network : boolean, optional (default=False)
Which network to use. If True, the transform will operate over X in
windows of 221x221 pixels. Otherwise, these windows will be 231x231.
output_layers : iterable, optional (default=[-1])
Which layers to return. Can be used to retrieve multiple levels of
output with a single call to transform.
force_reshape : boolean, optional (default=True)
Whether or not to force the output to be two dimensional. If true,
this class can be used as part of a scikit-learn pipeline.
force_reshape currently only supports len(output_layers) == 1!
detailed_network : boolean, optional (default=True)
If set to True, layers will be indexed and counted as in the binary
version provided by the authors of OverFeat. I.e. convolution, relu,
zero-padding, max-pooling are all separate layers. If False specified
then convolution and relu are one unit and zero-padding layers are
omitted.
batch_size : int, optional (default=None)
If set, input will be transformed in batches of size batch_size. This
can save memory at intermediate processing steps.
"""
def __init__(self, large_network=False, output_layers=[-1],
force_reshape=True,
transpose_order=(0, 3, 1, 2),
detailed_network=False,
batch_size=None):
self.large_network = large_network
self.output_layers = output_layers
self.force_reshape = force_reshape
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network,
output_layers,
detailed=detailed_network)
self.batch_size = batch_size
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def transform(self, X):
"""
Transform a set of images.
Returns the features from each layer.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, n_features]
If force_reshape = False,
list of array-like, length output_layers,
each shape = [n_images, n_windows,
n_window_features]
Returns the features extracted for each of the n_images in X..
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
if self.batch_size is None:
if self.force_reshape:
return self.transform_function(X.transpose(
*self.transpose_order))[0].reshape((len(X), -1))
else:
return self.transform_function(
X.transpose(*self.transpose_order))
else:
XT = X.transpose(*self.transpose_order)
n_samples = XT.shape[0]
for i in range(0, n_samples, self.batch_size):
transformed_batch = self.transform_function(
XT[i:i + self.batch_size])
# at first iteration, initialize output arrays to correct size
if i == 0:
shapes = [(n_samples,) + t.shape[1:] for t in
transformed_batch]
ravelled_shapes = [np.prod(shp[1:]) for shp in shapes]
if self.force_reshape:
output_width = np.sum(ravelled_shapes)
output = np.empty((n_samples, output_width),
dtype=transformed_batch[0].dtype)
break_points = np.r_([0], np.cumsum(ravelled_shapes))
raw_output = [
output[:, start:stop] for start, stop in
zip(break_points[:-1], break_points[1:])]
else:
output = [np.empty(shape,
dtype=transformed_batch.dtype)
for shape in shapes]
raw_output = [arr.reshape(n_samples, -1)
for arr in output]
for transformed, out in zip(transformed_batch, raw_output):
out[i:i + batch_size] = transformed
return output
class OverfeatClassifier(BaseEstimator):
"""
A classifier for cropped images using the OverFeat neural network.
If large_network=True, this X will be cropped to the center
221x221 pixels. Otherwise, this cropped box will be 231x231.
Parameters
----------
large_network : boolean, optional (default=False)
Which network to use. If large_network = True, input will be cropped
to the center 221 x 221 pixels. Otherwise, input will be cropped to the
center 231 x 231 pixels.
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
Attributes
----------
crop_bounds_ : tuple, (x_left, x_right, y_lower, y_upper)
The coordinate boundaries of the cropping box used.
"""
def __init__(self, top_n=5, large_network=False, output_strings=True,
transpose_order=(0, 3, 1, 2)):
self.top_n = top_n
self.large_network = large_network
if self.large_network:
self.min_size = (221, 221)
else:
self.min_size = (231, 231)
self.output_strings = output_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network, [-1])
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def _predict_proba(self, X):
x_midpoint = X.shape[2] // 2
y_midpoint = X.shape[1] // 2
x_lower_bound = x_midpoint - self.min_size[0] // 2
if x_lower_bound <= 0:
x_lower_bound = 0
x_upper_bound = x_lower_bound + self.min_size[0]
y_lower_bound = y_midpoint - self.min_size[1] // 2
if y_lower_bound <= 0:
y_lower_bound = 0
y_upper_bound = y_lower_bound + self.min_size[1]
self.crop_bounds_ = (x_lower_bound, x_upper_bound, y_lower_bound,
y_upper_bound)
res = self.transform_function(
X[:, y_lower_bound:y_upper_bound,
x_lower_bound:x_upper_bound, :].transpose(
*self.transpose_order))[0]
# Softmax activation
exp_res = np.exp(res - res.max(axis=1))
exp_res /= np.sum(exp_res, axis=1)
return exp_res
def predict(self, X):
"""
Classify a set of cropped input images.
Returns the top_n classes.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n classes for each of the n_images in X.
If output_strings is True, then the result will be string
description of the class label.
Otherwise, the returned values will be the integer class label.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
indices = np.argsort(res, axis=1)
indices = indices[:, -self.top_n:]
if self.output_strings:
class_strings = np.empty_like(indices,
dtype=object)
for index, value in enumerate(indices.flat):
class_strings.flat[index] = get_overfeat_class_label(value)
return class_strings
else:
return indices
def predict_proba(self, X):
"""
Prediction probability for a set of cropped input images.
Returns the top_n probabilities.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n probabilities for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
return np.sort(res, axis=1)[:, -self.top_n:]
class OverfeatLocalizer(BaseEstimator):
"""
A localizer for single images using the OverFeat neural network.
If large_network=True, this X will be cropped to the center
221x221 pixels. Otherwise, this box will be 231x231.
Parameters
----------
match_strings : iterable of strings
An iterable of class names to match with localizer. Can be a full
ImageNet class string or a WordNet leaf such as 'dog.n.01'. If the
pattern '.n.' is found in the match string, it will be treated as a
WordNet leaf, otherwise the string is assumed to be a class label.
large_network : boolean, optional (default=False)
Which network to use. If True, the transform will operate over X in
windows of 221x221 pixels. Otherwise, these windows will be 231x231.
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
"""
def __init__(self, match_strings, top_n=5, large_network=False,
transpose_order=(2, 0, 1)):
self.top_n = top_n
self.large_network = large_network
if self.large_network:
self.min_size = (221, 221)
else:
self.min_size = (231, 231)
self.match_strings = match_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network, [-1])
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def predict(self, X):
"""
Localize an input image.
Returns the points where the top_n classes contains any of the
match_strings.
Parameters
----------
X : array-like, shape = [height, width, color]
Returns
-------
T : list of array-likes, each of shape = [n_points, 2]
For each string in match_strings, points where that string was
in the top_n classes. len(T) will be equal to len(match_strings).
Each array in T is of size n_points x 2, where column 0 is
x point coordinate and column 1 is y point coordinate.
This means that an entry in T can be plotted with
plt.scatter(T[i][:, 0], T[i][:, 1])
"""
X = check_tensor(X, dtype=np.float32, n_dim=3)
res = self.transform_function(X.transpose(
*self.transpose_order)[None])[0]
# Softmax activation
exp_res = np.exp(res - res.max(axis=1))
exp_res /= np.sum(exp_res, axis=1)
indices = np.argsort(exp_res, axis=1)[:, -self.top_n:, :, :]
height = X.shape[0]
width = X.shape[1]
x_bound = width - self.min_size[0]
y_bound = height - self.min_size[1]
n_y = indices.shape[2]
n_x = indices.shape[3]
x_points = np.linspace(0, x_bound, n_x).astype('int32')
y_points = np.linspace(0, y_bound, n_y).astype('int32')
x_points = x_points + self.min_size[0] // 2
y_points = y_points + self.min_size[1] // 2
xx, yy = np.meshgrid(x_points, y_points)
per_window_labels = indices[0]
per_window_labels = per_window_labels.reshape(len(per_window_labels),
-1)
all_matches = []
overfeat_leaves = get_all_overfeat_leaves()
for match_string in self.match_strings:
if '.n.' in match_string:
# We were provided a wordnet category and must conglomerate
# points
all_match_labels = overfeat_leaves[match_string]
overfeat_labels = get_all_overfeat_labels()
match_indices = np.array(([overfeat_labels.index(s)
for s in all_match_labels]))
match_indices = np.unique(match_indices)
matches = np.where(
np.in1d(per_window_labels, match_indices).reshape(
per_window_labels.shape) == True)[1]
all_matches.append(np.vstack((xx.flat[matches],
yy.flat[matches])).T)
else:
# Asssume this is an OverFeat class
match_index = get_all_overfeat_labels().index(match_string)
matches = np.where(per_window_labels == match_index)[1]
all_matches.append(np.vstack((xx.flat[matches],
yy.flat[matches])).T)
return all_matches
| 0.000532 |
import csv, cPickle
import numpy as np
import matplotlib.pyplot as plt
"""
Something quick to get a set of genes from a csv file
"""
file_in = 'batch_query_no_infertile.tsv'
field = 'human_gene_symbol'
ddg2p = 'DDG2P.csv'
annotations = 'annotations.cPickle'
all_output = 'tsv_names_summary_out.txt'
gene_set = set()
gene_duplicates = set()
printed_lines = []
# Import the file
with open(file_in, 'rU') as handle:
dict = csv.DictReader(handle, delimiter='\t')
for row in dict:
gene_list = row[field].split('|')
printed_lines.append('{} - {}: {}'.format(row['mp_id'], row['mp_definition'], len(gene_list)))
for gene in gene_list:
if gene in gene_set:
gene_duplicates.add(gene)
else:
gene_set.add(gene)
printed_lines.append('Unique genes found: {}'.format(len(gene_set)))
printed_lines.append('{} genes were present in multiple categories:\n'.format(len(gene_duplicates)))
printed_lines.append(gene_duplicates)
# Dump the gene set to a pickle file
with open('genes_of_interest.cPickle', 'w') as handle:
cPickle.dump(gene_set, handle)
# Grab all the gene names from the DDG2P input file
ddg2p_set = set()
first_line = True
with open(ddg2p, 'r') as handle:
for line in handle:
if first_line:
first_line = False
else:
ddg2p_set.add(line.split(',')[0])
# Identify any overlapping genes:
ddg2p_overlap = set()
for gene in gene_set:
if gene in ddg2p_set:
ddg2p_overlap.add(gene)
# Dump the gene set to a pickle file
with open('ddg2p_overlap_genes.cPickle', 'w') as handle:
cPickle.dump(ddg2p_overlap, handle)
printed_lines.append('Total phenotype genes overlapping DDG2P: {}'.format(len(ddg2p_overlap)))
printed_lines.append(ddg2p_overlap)
# Import and use the pickled set of annotations from the DDD project
# This contains the HI, HS, and phenotype details where available
with open(annotations, 'r') as handle:
anno_dict = cPickle.load(handle)
# Create a list to hold all the
hi_scores = []
annotated_genes = set()
not_found = set()
for gene in ddg2p_overlap:
found = False
for chromosome in anno_dict:
if gene in anno_dict[chromosome]:
found = True
annotated_genes.add(gene)
printed_lines.append('\nHI Gene Annotations for {}'.format(gene))
ann_keys = anno_dict[chromosome][gene].keys()
if 'hi_score' in ann_keys:
printed_lines.append('\tHI: {}'.format(anno_dict[chromosome][gene]['hi_score']))
hi_scores.append(float(anno_dict[chromosome][gene]['hi_score']))
if 'hs_score' in ann_keys:
printed_lines.append('\tHS: {}'.format(anno_dict[chromosome][gene]['hs_score']))
if 'diseases' in ann_keys:
for disease in anno_dict[chromosome][gene]['diseases']:
printed_lines.append('\t{}'.format(disease))
if not found:
not_found.add(gene)
printed_lines.append('\n{}/{} Genes had annotations available'.format(len(annotated_genes), len(ddg2p_overlap)))
printed_lines.append('{} Genes didn\'t have annotations:'.format(len(not_found)))
printed_lines.append(not_found)
with open(all_output, 'wb') as handle:
for line in printed_lines:
print >>handle, line
# Maybe try and plot this as a graph
line = plt.figure()
plt.plot(sorted(hi_scores), 'o')
plt.ylabel('HI Score')
plt.xlabel('Gene (sorted by HI score)')
plt.title('A scatter plot of all HI scores')
plt.show() | 0.009001 |
"""
XBMCLocalProxy 0.1
Copyright 2011 Torben Gerkensmeyer
Modified for Livestreamer by your mom 2k15
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import base64
import re
import urlparse
import sys
import traceback
import socket
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
"""
Serves a HEAD request
"""
def do_HEAD(self):
#print "XBMCLocalProxy: Serving HEAD request..."
self.answer_request(0)
"""
Serves a GET request.
"""
def do_GET(self):
#print "XBMCLocalProxy: Serving GET request..."
self.answer_request(1)
def answer_request(self, sendData):
try:
request_path = self.path[1:]
#print 'request_path: ' + request_path
extensions = ['.Vprj', '.edl', '.txt', '.chapters.xml']
for extension in extensions:
if request_path.endswith(extension):
self.send_response(404)
request_path = ''
request_path = re.sub(r"\?.*", "", request_path)
if request_path == "stop":
sys.exit()
elif request_path == "version":
self.send_response(200)
self.end_headers()
self.wfile.write("Proxy: Running\r\n")
self.wfile.write("Version: 0.1")
elif request_path[0:13] == "livestreamer/":
realpath = request_path[13:]
#print 'realpath: ' + realpath
fURL = base64.b64decode(realpath)
#print 'fURL: ' + fURL
self.serveFile(fURL, sendData)
else:
self.send_response(403)
except:
traceback.print_exc()
self.wfile.close()
return
try:
self.wfile.close()
except:
pass
"""
Sends the requested file and add additional headers.
"""
def serveFile(self, fURL, sendData):
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
session = Livestreamer()
if '|' in fURL:
sp = fURL.split('|')
fURL = sp[0]
headers = dict(urlparse.parse_qsl(sp[1]))
session.set_option("http-headers", headers)
cookies = dict(urlparse.parse_qsl(sp[2]))
session.set_option("http-cookie", cookies)
streams = session.streams(fURL)
except:
traceback.print_exc(file=sys.stdout)
self.send_response(403)
self.send_response(200)
#print "XBMCLocalProxy: Sending headers..."
self.end_headers()
if (sendData):
#print "XBMCLocalProxy: Sending data..."
fileout = self.wfile
try:
stream = streams["best"]
try:
response = stream.open()
buf = 'INIT'
while (buf != None and len(buf) > 0):
buf = response.read(300 * 1024)
fileout.write(buf)
fileout.flush()
response.close()
fileout.close()
#print time.asctime(), "Closing connection"
except socket.error, e:
#print time.asctime(), "Client Closed the connection."
try:
response.close()
fileout.close()
except Exception, e:
return
except Exception, e:
traceback.print_exc(file=sys.stdout)
response.close()
fileout.close()
except:
traceback.print_exc()
self.wfile.close()
return
try:
self.wfile.close()
except:
pass
class Server(HTTPServer):
"""HTTPServer class with timeout."""
def get_request(self):
"""Get the request and client address from the socket."""
self.socket.settimeout(5.0)
result = None
while result is None:
try:
result = self.socket.accept()
except socket.timeout:
pass
result[0].settimeout(1000)
return result
class ThreadedHTTPServer(ThreadingMixIn, Server):
"""Handle requests in a separate thread."""
HOST_NAME = '127.0.0.1'
PORT_NUMBER = 19000
if __name__ == '__main__':
socket.setdefaulttimeout(10)
server_class = ThreadedHTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
#print "XBMCLocalProxy Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
while(True):
httpd.handle_request()
httpd.server_close()
#print "XBMCLocalProxy Stops %s:%s" % (HOST_NAME, PORT_NUMBER)
| 0.002751 |
"""
These tests are meant to exercise that requests to create objects bigger
than what the address space allows are properly met with an OverflowError
(rather than crash weirdly).
Primarily, this means 32-bit builds with at least 2 GB of available memory.
You need to pass the -M option to regrtest (e.g. "-M 2.1G") for tests to
be enabled.
"""
from test import support
from test.support import bigaddrspacetest, MAX_Py_ssize_t
import unittest
import operator
import sys
class BytesTest(unittest.TestCase):
@bigaddrspacetest
def test_concat(self):
# Allocate a bytestring that's near the maximum size allowed by
# the address space, and then try to build a new, larger one through
# concatenation.
try:
x = b"x" * (MAX_Py_ssize_t - 128)
self.assertRaises(OverflowError, operator.add, x, b"x" * 128)
finally:
x = None
@bigaddrspacetest
def test_optimized_concat(self):
try:
x = b"x" * (MAX_Py_ssize_t - 128)
with self.assertRaises(OverflowError) as cm:
# this statement used a fast path in ceval.c
x = x + b"x" * 128
with self.assertRaises(OverflowError) as cm:
# this statement used a fast path in ceval.c
x += b"x" * 128
finally:
x = None
@bigaddrspacetest
def test_repeat(self):
try:
x = b"x" * (MAX_Py_ssize_t - 128)
self.assertRaises(OverflowError, operator.mul, x, 128)
finally:
x = None
class StrTest(unittest.TestCase):
unicodesize = 2 if sys.maxunicode < 65536 else 4
@bigaddrspacetest
def test_concat(self):
try:
# Create a string that would fill almost the address space
x = "x" * int(MAX_Py_ssize_t // (1.1 * self.unicodesize))
# Unicode objects trigger MemoryError in case an operation that's
# going to cause a size overflow is executed
self.assertRaises(MemoryError, operator.add, x, x)
finally:
x = None
@bigaddrspacetest
def test_optimized_concat(self):
try:
x = "x" * int(MAX_Py_ssize_t // (1.1 * self.unicodesize))
with self.assertRaises(MemoryError) as cm:
# this statement uses a fast path in ceval.c
x = x + x
with self.assertRaises(MemoryError) as cm:
# this statement uses a fast path in ceval.c
x += x
finally:
x = None
@bigaddrspacetest
def test_repeat(self):
try:
x = "x" * int(MAX_Py_ssize_t // (1.1 * self.unicodesize))
self.assertRaises(MemoryError, operator.mul, x, 2)
finally:
x = None
def test_main():
support.run_unittest(BytesTest, StrTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
| 0.001004 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_certificate_info
version_added: "2.10"
short_description: Get information on certificates from a Windows Certificate Store
description:
- Returns information about certificates in a Windows Certificate Store.
options:
thumbprint:
description:
- The thumbprint as a hex string of a certificate to find.
- When specified, filters the I(certificates) return value to a single certificate
- See the examples for how to format the thumbprint.
type: str
required: no
store_name:
description:
- The name of the store to search.
- See U(https://docs.microsoft.com/en-us/dotnet/api/system.security.cryptography.x509certificates.storename)
for a list of built-in store names.
type: str
default: My
store_location:
description:
- The location of the store to search.
type: str
choices: [ CurrentUser, LocalMachine ]
default: LocalMachine
seealso:
- module: win_certificate_store
author:
- Micah Hunsberger (@mhunsber)
'''
EXAMPLES = r'''
- name: Obtain information about a particular certificate in the computer's personal store
win_certificate_info:
thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
register: mycert
# thumbprint can also be lower case
- name: Obtain information about a particular certificate in the computer's personal store
win_certificate_info:
thumbprint: bd7af104cf1872bdb518d95c9534ea941665fd27
register: mycert
- name: Obtain information about all certificates in the root store
win_certificate_info:
store_name: Root
register: ca
# Import a pfx and then get information on the certificates
- name: Import pfx certificate that is password protected
win_certificate_store:
path: C:\Temp\cert.pfx
state: present
password: VeryStrongPasswordHere!
become: yes
become_method: runas
register: mycert
- name: Obtain information on each certificate that was touched
win_certificate_info:
thumbprint: "{{ item }}"
register: mycert_stats
loop: "{{ mycert.thumbprints }}"
'''
RETURN = r'''
exists:
description:
- Whether any certificates were found in the store.
- When I(thumbprint) is specified, returns true only if the certificate mathing the thumbprint exists.
returned: success
type: bool
sample: true
certificates:
description:
- A list of information about certificates found in the store, sorted by thumbprint.
returned: success
type: list
elements: dict
contains:
archived:
description: Indicates that the certificate is archived.
type: bool
sample: false
dns_names:
description: Lists the registered dns names for the certificate.
type: list
elements: str
sample: [ '*.m.wikiquote.org', '*.wikipedia.org' ]
extensions:
description: The collection of the certificates extensions.
type: list
elements: dict
sample: [
{
"critical": false,
"field": "Subject Key Identifier",
"value": "88 27 17 09 a9 b6 18 60 8b ec eb ba f6 47 59 c5 52 54 a3 b7"
},
{
"critical": true,
"field": "Basic Constraints",
"value": "Subject Type=CA, Path Length Constraint=None"
},
{
"critical": false,
"field": "Authority Key Identifier",
"value": "KeyID=2b d0 69 47 94 76 09 fe f4 6b 8d 2e 40 a6 f7 47 4d 7f 08 5e"
},
{
"critical": false,
"field": "CRL Distribution Points",
"value": "[1]CRL Distribution Point: Distribution Point Name:Full Name:URL=http://crl.apple.com/root.crl"
},
{
"critical": true,
"field": "Key Usage",
"value": "Digital Signature, Certificate Signing, Off-line CRL Signing, CRL Signing (86)"
},
{
"critical": false,
"field": null,
"value": "05 00"
}
]
friendly_name:
description: The associated alias for the certificate.
type: str
sample: Microsoft Root Authority
has_private_key:
description: Indicates that the certificate contains a private key.
type: bool
sample: false
intended_purposes:
description: lists the intended applications for the certificate.
returned: enhanced key usages extension exists.
type: list
sample: [ "Server Authentication" ]
is_ca:
description: Indicates that the certificate is a certificate authority (CA) certificate.
returned: basic constraints extension exists.
type: bool
sample: true
issued_by:
description: The certificate issuer's common name.
type: str
sample: Apple Root CA
issued_to:
description: The certificate's common name.
type: str
sample: Apple Worldwide Developer Relations Certification Authority
issuer:
description: The certificate issuer's distinguished name.
type: str
sample: 'CN=Apple Root CA, OU=Apple Certification Authority, O=Apple Inc., C=US'
key_usages:
description:
- Defines how the certificate key can be used.
- If this value is not defined, the key can be used for any purpose.
returned: key usages extension exists.
type: list
elements: str
sample: [ "CrlSign", "KeyCertSign", "DigitalSignature" ]
path_length_constraint:
description:
- The number of levels allowed in a certificates path.
- If this value is 0, the certificate does not have a restriction.
returned: basic constraints extension exists
type: int
sample: 0
public_key:
description: The base64 encoded public key of the certificate.
type: str
cert_data:
description: The base64 encoded data of the entire certificate.
type: str
serial_number:
description: The serial number of the certificate represented as a hexadecimal string
type: str
sample: 01DEBCC4396DA010
signature_algorithm:
description: The algorithm used to create the certificate's signature
type: str
sample: sha1RSA
ski:
description: The certificate's subject key identifier
returned: subject key identifier extension exists.
type: str
sample: 88271709A9B618608BECEBBAF64759C55254A3B7
subject:
description: The certificate's distinguished name.
type: str
sample: 'CN=Apple Worldwide Developer Relations Certification Authority, OU=Apple Worldwide Developer Relations, O=Apple Inc., C=US'
thumbprint:
description:
- The thumbprint as a hex string of the certificate.
- The return format will always be upper case.
type: str
sample: FF6797793A3CD798DC5B2ABEF56F73EDC9F83A64
valid_from:
description: The start date of the certificate represented in seconds since epoch.
type: float
sample: 1360255727
valid_from_iso8601:
description: The start date of the certificate represented as an iso8601 formatted date.
type: str
sample: '2017-12-15T08:39:32Z'
valid_to:
description: The expiry date of the certificate represented in seconds since epoch.
type: float
sample: 1675788527
valid_to_iso8601:
description: The expiry date of the certificate represented as an iso8601 formatted date.
type: str
sample: '2086-01-02T08:39:32Z'
version:
description: The x509 format version of the certificate
type: int
sample: 3
'''
| 0.002472 |
from datetime import datetime
from nextgisweb import DBSession
from shapely.geometry import MultiLineString
import transaction
from nextgisweb_compulink.compulink_data_reactor.reactors.abstract_reactor import AbstractReactor
from nextgisweb_compulink.compulink_data_reactor import COMP_ID
from nextgisweb.feature_layer import Feature
from nextgisweb.vector_layer import TableInfo
from nextgisweb_compulink.compulink_admin.model import FoclStruct
from ...utils import DistanceUtils
from nextgisweb_log.model import LogEntry
__author__ = 'yellow'
@AbstractReactor.registry.register
class ConstructSpecTransitionLineReactor(AbstractReactor):
identity = 'construct_spec_transition_line'
priority = 2
# Max len of spec transition
DISTANCE_LIMIT = 300
log_info = staticmethod(lambda x: LogEntry.info(x, component=COMP_ID, group=ConstructSpecTransitionLineReactor.identity, append_dt=datetime.now()))
log_debug = staticmethod(lambda x: LogEntry.debug(x, component=COMP_ID, group=ConstructSpecTransitionLineReactor.identity, append_dt=datetime.now()))
log_warning = staticmethod(lambda x: LogEntry.warning(x, component=COMP_ID, group=ConstructSpecTransitionLineReactor.identity, append_dt=datetime.now()))
@classmethod
def run(cls, env):
db_session = DBSession()
transaction.manager.begin()
cls.log_info('ConstructSpecTransitionLineReactor started!')
fs_resources = db_session.query(FoclStruct).all()
for fs in fs_resources:
cls.smart_construct_line(fs)
db_session.flush()
cls.log_info('ConstructSpecTransitionLineReactor finished!')
transaction.manager.commit()
@classmethod
def construct_line(cls, focl_res):
points_lyr = [lyr for lyr in focl_res.children if lyr.keyname and lyr.keyname.startswith('actual_real_special_transition_point')]
points_lyr = points_lyr[0] if len(points_lyr) else None
lines_lyr = [lyr for lyr in focl_res.children if lyr.keyname and
not lyr.keyname.startswith('actual_real_special_transition_point') and
lyr.keyname.startswith('actual_real_special_transition')]
lines_lyr = lines_lyr[0] if len(lines_lyr) else None
query = points_lyr.feature_query()
query.geom()
result = query()
if result.total_count > 0:
cls.log_debug('Construct spec_trans line for %s started!' % focl_res.display_name)
else:
cls.log_debug('Construct spec_trans line for %s skeeped (no points)!' % focl_res.display_name)
return
starts = []
ends = []
for feature in result:
if feature.fields['special_laying_number'] == 'entrance':
starts.append(feature)
else:
ends.append(feature)
if len(starts) != len(ends):
cls.log_warning('Line %s has unpaired count of start\end points! (%s\%s)' % (focl_res.display_name, len(starts), len(ends)))
#clear line lyr
cls.clear_layer(lines_lyr)
#merge points in two mass
for start_point_feat in starts:
if len(ends) < 1:
continue
# get near point
near_point_feat = ends[0]
near_len = start_point_feat.geom.distance(near_point_feat.geom)
for end_point_feat in ends:
if start_point_feat.geom.distance(end_point_feat.geom) < near_len:
near_point_feat = end_point_feat
near_len = start_point_feat.geom.distance(end_point_feat.geom)
# check distance limit
real_dist = DistanceUtils.get_spherical_distance(start_point_feat.geom[0], near_point_feat.geom[0])
if real_dist > cls.DISTANCE_LIMIT:
cls.log_warning('Point %s has no paired points near that maximum distance!' % start_point_feat.id)
continue
# construct line
line_feats = [start_point_feat, near_point_feat]
info = cls.get_segment_info(line_feats)
cls.write_segment(lines_lyr, line_feats, info)
# remove from ends
ends.remove(near_point_feat)
@classmethod
def smart_construct_line(cls, focl_res):
# get target layer
lines_lyr = [lyr for lyr in focl_res.children if lyr.keyname and
not lyr.keyname.startswith('actual_real_special_transition_point') and
lyr.keyname.startswith('actual_real_special_transition')]
lines_lyr = lines_lyr[0] if len(lines_lyr) else None
if not lines_lyr:
cls.log_debug('Construct line for %s skeeped (no result line layer)!' % focl_res.display_name)
return
# get existings lines (for filter points)
query = lines_lyr.feature_query()
query.geom()
lines = query()
lines_vertexes = []
for line_feat in lines:
for coord in line_feat.geom[0].coords:
lines_vertexes.append(coord)
# Collect features for processing
points_lyr = [lyr for lyr in focl_res.children if
lyr.keyname and lyr.keyname.startswith('actual_real_special_transition_point')]
points_lyr = points_lyr[0] if len(points_lyr) else None
features = []
if points_lyr:
# get all points
query = points_lyr.feature_query()
query.geom()
result = query()
# filter - only non ~lined~
for feature in result:
if feature.geom[0].coords[0] not in lines_vertexes:
features.append(feature)
if len(features) > 0:
cls.log_debug('Construct spec_trans line for %s started!' % focl_res.display_name)
else:
cls.log_debug('Construct spec_trans line for %s skeeped (no points)!' % focl_res.display_name)
return
# split points as starts and ends
starts = []
ends = []
for feature in features:
if feature.fields['special_laying_number'] == 'entrance':
starts.append(feature)
else:
ends.append(feature)
if len(starts) != len(ends):
cls.log_warning('Line %s has unpaired count of start\end points! (%s\%s)' % (
focl_res.display_name, len(starts), len(ends)))
# merge points to segments
for start_point_feat in starts:
if len(ends) < 1:
continue
# get near point
near_point_feat = ends[0]
near_len = start_point_feat.geom.distance(near_point_feat.geom)
for end_point_feat in ends:
if start_point_feat.geom.distance(end_point_feat.geom) < near_len:
near_point_feat = end_point_feat
near_len = start_point_feat.geom.distance(end_point_feat.geom)
# check distance limit
real_dist = DistanceUtils.get_spherical_distance(start_point_feat.geom[0], near_point_feat.geom[0])
if real_dist > cls.DISTANCE_LIMIT:
cls.log_warning('Point %s has no paired points near that maximum distance!' % start_point_feat.id)
continue
# construct line
line_feats = [start_point_feat, near_point_feat]
info = cls.get_segment_info(line_feats)
cls.write_segment(lines_lyr, line_feats, info)
# remove from ends
ends.remove(near_point_feat)
@classmethod
def clear_layer(cls, layer):
tableinfo = TableInfo.from_layer(layer)
tableinfo.setup_metadata(tablename=layer._tablename)
DBSession.query(tableinfo.model).delete()
@classmethod
def write_segment(cls, layer, line_feats, info):
points = [feat.geom[0].coords[0] for feat in line_feats]
feature = Feature(fields=info, geom=MultiLineString([points]))
feature_id = layer.feature_create(feature)
@classmethod
def get_segment_info(cls, features):
# get laying_method
laying_methods = []
for feat in features:
if feat.fields['special_laying_method'] and feat.fields['special_laying_method'] not in laying_methods:
laying_methods.append(feat.fields['special_laying_method'])
if laying_methods:
order = ['hdd', 'towers', 'bottom']
for selected_lay_met in order:
if selected_lay_met in laying_methods:
laying_method = selected_lay_met
break
# set first
if not laying_method:
laying_method = laying_methods[0]
else:
laying_method = None
# get built_date
built_date = features[0].fields['built_date']
for feat in features:
if feat.fields['built_date'] > built_date:
built_date = feat.fields['built_date']
return {'special_laying_method': laying_method, 'built_date': built_date} | 0.00418 |
import logging
import os
class MediaTypeTranslator():
def __init__(self):
self.types = {}
self.addTypes(self.DOCUMENTS, 'document')
self.addTypes(self.OSX_APPLICATIONS, 'application/mac')
self.addTypes(self.LINUX_APPLICATIONS, 'application/linux')
self.addTypes(self.WINDOWS_APPLICATIONS, 'application/win')
self.addTypes(self.AUDIO, 'audio')
self.addTypes(self.VIDEO, 'video')
self.addTypes(self.IMAGE, 'image')
self.addTypes(self.GENERAL_APPLICATION, 'application')
self.addTypes(self.ARCHIVE, 'archive')
def addTypes(self, extensions, category):
#logging.info('Adding types....')
for ext in extensions:
if self.types.has_key(ext):
logging.error('Duplicate type: %s', ext)
else:
self.types[ext] = category
def getType(self, fileName):
#logging.info('Getting ext for file name: %s', fileName)
(shortname, ext) = os.path.splitext(fileName)
# The splitext function leaves the '.'
ext = ext.strip('.').lower()
#logging.info('Found extension: %s', ext)
if ext is None or len(ext) > 7:
logging.debug('No extension: %s', ext)
return 'unknown'
elif not self.types.has_key(ext):
logging.warn('Unknown extension: %s', ext)
return 'unknown'
else:
return self.types.get(ext)
DOCUMENTS = [
'html', 'htm', 'xhtml', 'mht', 'mhtml', 'xml',
'txt', 'ans', 'asc', 'diz', 'eml',
'pdf', 'ps', 'epsf', 'dvi',
'rtf', 'wri', 'doc', 'mcw', 'wps',
'xls', 'wk1', 'dif', 'csv', 'ppt', 'tsv',
'hlp', 'chm', 'lit',
'tex', 'texi', 'latex', 'info', 'man',
'wp', 'wpd', 'wp5', 'wk3', 'wk4', 'shw',
'sdd', 'sdw', 'sdp', 'sdc',
'sxd', 'sxw', 'sxp', 'sxc',
'abw', 'kwd', 'js', 'java', 'cpp', 'c', 'py', 'php', 'ruby',
'pps', # PowerPoint show
'dll',
'jhtml', # Java in html
'mmap', # mind mapping document
'dat', # data file
'bash',
]
OSX_APPLICATIONS = [
'dmg', 'pkg'
]
LINUX_APPLICATIONS = [
'mdb', 'sh', 'csh', 'awk', 'pl',
'rpm', 'deb', 'z', 'zoo', 'tar',
'taz', 'shar', 'hqx', '7z',
]
WINDOWS_APPLICATIONS = [
'exe', 'cab', 'msi', 'msp',
'arj', 'ace',
'nsi', # Nullsoft installer.
]
AUDIO = [
'mp3', 'mpa', 'mp1', 'mpga', 'mp2',
'ra', 'rm', 'ram', 'rmj',
'wma', 'wav', 'm4a', 'm4p',
'lqt', 'ogg', 'med',
'aif', 'aiff', 'aifc',
'au', 'snd', 's3m', 'aud',
'mid', 'midi', 'rmi', 'mod', 'kar',
'ac3', 'shn', 'fla', 'flac', 'cda',
'mka',
]
VIDEO = [
'mpg', 'mpeg', 'mpe', 'mng', 'mpv', 'm1v',
'vob', 'mpv2', 'mp2v', 'm2p', 'm2v', 'm4v', 'mpgv',
'vcd', 'mp4', 'dv', 'dvd', 'div', 'divx', 'dvx',
'smi', 'smil', 'rv', 'rmm', 'rmvb',
'avi', 'asf', 'asx', 'wmv', 'qt', 'mov',
'fli', 'flc', 'flx', 'flv',
'wml', 'vrml', 'swf', 'dcr', 'jve', 'nsv',
'mkv', 'ogm',
'cdg', 'srt', 'sub', 'idx', 'msmedia',
'wvx', # This is a redirect to a wmv
]
IMAGE = [
'gif', 'png',
'jpg', 'jpeg', 'jpe', 'jif', 'jiff', 'jfif',
'tif', 'tiff', 'iff', 'lbm', 'ilbm', 'eps',
'mac', 'drw', 'pct', 'img',
'bmp', 'dib', 'rle', 'ico', 'ani', 'icl', 'cur',
'emf', 'wmf', 'pcx',
'pcd', 'tga', 'pic', 'fig',
'psd', 'wpg', 'dcx', 'cpt', 'mic',
'pbm', 'pnm', 'ppm', 'xbm', 'xpm', 'xwd',
'sgi', 'fax', 'rgb', 'ras'
]
GENERAL_APPLICATION = [
'jar', 'jnlp', 'iso', 'bin',
'nrg', # Nero CD image file.
'cue', # Another CD image file type.
]
ARCHIVE = [
'zip', 'sitx', 'sit', 'tgz', 'gz', 'gzip', 'bz2','rar', 'lzh','lha'
]
| 0.013458 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
import os
class Anaconda(Plugin, RedHatPlugin):
"""Anaconda installer
"""
plugin_name = 'anaconda'
profiles = ('system',)
files = (
'/var/log/anaconda.log',
'/var/log/anaconda'
)
def setup(self):
paths = [
"/root/anaconda-ks.cfg"
]
if os.path.isdir('/var/log/anaconda'):
# new anaconda
paths.append('/var/log/anaconda')
else:
paths = paths + [
"/var/log/anaconda.*",
"/root/install.log",
"/root/install.log.syslog"
]
self.add_copy_spec(paths)
def postproc(self):
self.do_file_sub(
"/root/anaconda-ks.cfg",
r"(\s*rootpw\s*).*",
r"\1********"
)
self.do_file_sub(
"/root/anaconda-ks.cfg",
r"(user.*--password=*\s*)\s*(\S*)",
r"\1********"
)
# vim: set et ts=4 sw=4 :
| 0 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
def _change_index_columns(migrate_engine, new_columns, old_columns):
meta = MetaData()
meta.bind = migrate_engine
table = Table('project_user_quotas', meta, autoload=True)
index_name = 'project_user_quotas_user_id_deleted_idx'
Index(
index_name,
*[getattr(table.c, col) for col in old_columns]
).drop(migrate_engine)
Index(
index_name,
*[getattr(table.c, col) for col in new_columns]
).create()
def upgrade(migrate_engine):
new_columns = ('user_id', 'deleted')
old_columns = ('project_id', 'deleted')
_change_index_columns(migrate_engine, new_columns, old_columns)
def downgrade(migrate_engine):
new_columns = ('project_id', 'deleted')
old_columns = ('user_id', 'deleted')
_change_index_columns(migrate_engine, new_columns, old_columns)
| 0 |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Utility functions which help with running tests.
"""
import numpy as np
from neon.backends.backend import Tensor
def assert_tensor_equal(actual, desired):
"""
Ensures that Tensor array contents are identical in shape and each element.
Arguments:
actual (object): The first Tensor for comparison.
desired (object): The expected value to be compared against.
Raises:
AssertionError: if any of the elements or shapes differ.
"""
assert_tensor_near_equal(actual, desired, tolerance=0)
def assert_tensor_near_equal(actual, desired, tolerance=1e-7):
"""
Ensures that Tensor array contents are equal (up to the specified
tolerance).
Arguments:
actual (object): The first value for comparison.
desired (object): The expected value to be compared against.
tolerance (float, optional): Threshold tolerance. Items are considered
equal if their absolute difference does
not exceed this value.
Raises:
AssertionError: if the objects differ.
"""
if isinstance(desired, Tensor):
desired = desired.asnumpyarray()
if isinstance(actual, Tensor):
actual = actual.asnumpyarray()
np.testing.assert_allclose(actual, desired, atol=tolerance, rtol=0)
| 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
generate_test_mask_image.py
---------------------
Date : February 2015
Copyright : (C) 2015 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2015'
__copyright__ = '(C) 2015, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
# Generates (or updates) a unit test image mask, which is used to specify whether
# a pixel in the control image should be checked (black pixel in mask) or not (white
# pixel in mask). For non black or white pixels, the pixels lightness is used to
# specify a maximum delta for each color component
import os
import sys
import argparse
from PyQt5.QtGui import QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb
import struct
import urllib.request
import urllib.error
import urllib.parse
import glob
def error(msg):
print(msg)
sys.exit(1)
def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
if (path[:7] == 'http://' or path[:7] == 'file://' or path[:8] == 'https://'):
# fetch remote image
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
image = QImage(path)
return image
def getControlImagePath(path):
if os.path.isfile(path):
return path
# else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0] for x in os.walk(control_images_folder) if path in x[0]]
if len(matching_control_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(matching_control_images) == 0:
error('No matching control images found for {}'.format(path))
found_control_image_path = matching_control_images[0]
# check for a single matching expected image
images = glob.glob(os.path.join(found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error('Found multiple matching control images for {}'.format(path))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(path))
found_image = filtered_images[0]
print('Found matching control image: {}'.format(found_image))
return found_image
def updateMask(control_image_path, rendered_image_path, mask_image_path):
control_image = imageFromPath(control_image_path)
if not control_image:
error('Could not read control image {}'.format(control_image_path))
rendered_image = imageFromPath(rendered_image_path)
if not rendered_image:
error('Could not read rendered image {}'.format(rendered_image_path))
if not rendered_image.width() == control_image.width() or not rendered_image.height() == control_image.height():
print(('Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(control_image.width(),
control_image.height(),
rendered_image.width(),
rendered_image.height())))
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
# read current mask, if it exist
mask_image = imageFromPath(mask_image_path)
if mask_image.isNull():
print('Mask image does not exist, creating {}'.format(mask_image_path))
mask_image = QImage(control_image.width(), control_image.height(), QImage.Format_ARGB32)
mask_image.fill(QColor(0, 0, 0))
# loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
continue
expected_rgb = struct.unpack('I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack('I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
# update mask image
mask_image.setPixel(x, y, qRgb(difference, difference, difference))
mismatch_count += 1
if mismatch_count:
# update mask
mask_image.save(mask_image_path, "png")
print('Updated {} pixels in {}'.format(mismatch_count, mask_image_path))
else:
print('No mismatches in {}'.format(mask_image_path))
parser = argparse.ArgumentParser() # OptionParser("usage: %prog control_image rendered_image mask_image")
parser.add_argument('control_image')
parser.add_argument('rendered_image')
parser.add_argument('mask_image', nargs='?', default=None)
args = parser.parse_args()
args.control_image = getControlImagePath(args.control_image)
if not args.mask_image:
args.mask_image = args.control_image[:-4] + '_mask.png'
updateMask(args.control_image, args.rendered_image, args.mask_image)
| 0.002859 |
#!/usr/bin/python
import os, sys, time, getopt
lib_path = os.path.abspath('testutils')
sys.path.append(lib_path)
from TestUtilsL47 import XenaScriptTools
def helptext():
print
print "Usage: %s ipaddr port\n" % (sys.argv[0])
print
sys.exit(1)
def main(argv):
c_debug = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "dh")
except getopt.GetoptError:
helptext()
return
for opt, arg in opts:
if opt == '-h':
helptext()
return
elif opt in ("-d"):
c_debug=1
if len(args) != 2:
helptext()
ip_address = args[0]
port = args[1]
xm = XenaScriptTools(ip_address)
if c_debug:
xm.debugOn()
xm.haltOn()
xm.Logon("xena")
cgs = xm.Send(port + " P4G_INDICES ?").split()[2:]
print "\n==PACKET COUNTS====================="
xm.PrintPortStatistics(port)
prx = xm.Send(port + " P4_ETH_RX_COUNTERS ?")
print prx
ptx = xm.Send(port + " P4_ETH_TX_COUNTERS ?")
print ptx
print "\n==TCP GOODPUT======================="
for cg in cgs:
res = xm.Send(port + " P4G_TCP_TX_PAYLOAD_COUNTERS [" + cg + "] ?")
print res
res = xm.Send(port + " P4G_TCP_RX_PAYLOAD_COUNTERS [" + cg + "] ?")
print res
print "\n==CONNECTION TIMES=================="
for cg in cgs:
res = xm.Send(port + " P4G_TCP_ESTABLISH_HIST [" + cg + "] ?")
print res
res = xm.Send(port + " P4G_TCP_CLOSE_HIST [" + cg + "] ?")
print res
print
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 0.035279 |
#!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Changed assertions to ValueError to catch common calling errors (by
# Francesco Montesano, [email protected])
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey(object):
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
if gap < 0:
raise ValueError(
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
if radius > gap:
raise ValueError(
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
if head_angle < 0:
raise ValueError(
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
if tolerance < 0:
raise ValueError(
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
# [6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
if len(orientations) != n:
raise ValueError(
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
if len(labels) != n:
raise ValueError(
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
if trunklength < 0:
raise ValueError(
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative.")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative.")
if prior >= len(self.diagrams):
raise ValueError(
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
"The value of orientations[%d] is %d, "
"but it must be [ -1 | 0 | 1 ]." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_affine
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
| 0.000931 |
# Copyright: (c) 2018, Toshio Kuratomi <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Context of the running Ansible.
In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
with different contexts but that is currently out of scope as the Ansible library is just for
running the ansible command line tools.
These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
"""
from ansible.module_utils.common._collections_compat import Mapping, Set
from ansible.module_utils.common.collections import is_sequence
from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs
__all__ = ('CLIARGS',)
# Note: this is not the singleton version. The Singleton is only created once the program has
# actually parsed the args
CLIARGS = CLIArgs({})
# This should be called immediately after cli_args are processed (parsed, validated, and any
# normalization performed on them). No other code should call it
def _init_global_context(cli_args):
"""Initialize the global context objects"""
global CLIARGS
CLIARGS = GlobalCLIArgs.from_options(cli_args)
def cliargs_deferred_get(key, default=None, shallowcopy=False):
"""Closure over getting a key from CLIARGS with shallow copy functionality
Primarily used in ``FieldAttribute`` where we need to defer setting the default
until after the CLI arguments have been parsed
This function is not directly bound to ``CliArgs`` so that it works with
``CLIARGS`` being replaced
"""
def inner():
value = CLIARGS.get(key, default=default)
if not shallowcopy:
return value
elif is_sequence(value):
return value[:]
elif isinstance(value, (Mapping, Set)):
return value.copy()
return value
return inner
| 0.003416 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This program prompts for a password."""
import authentication
import getpass
def login(username, maxattempts=3):
"""This function takes input from a user and checks the password.
Arg:
username(str): String input from user.
maxattempts(int): Max attempts for login.
Return:
auth(boolean): True or False if user successfully authenticated
before hitting maximum no. of failed attempts.
Examples:
>>>login('mike', 4)
Incorrect username or password. You have 4 attempts.
Incorrect username or password. You have 3 attempts.
Incorrect username or password. You have 2 attempts.
Incorrect username or password. You have 1 attempts.
Incorrect username or password. You have 0 attempts.
False
"""
auth = False
user_login = 'Please enter your password: '
auth_fail = "Incorrect username or password. You have" ' {} ' "attempts."
attempt = 1
while attempt <= maxattempts:
passwd = getpass.getpass(user_login)
message = authentication.authenticate(username, passwd)
if message:
auth = True
break
else:
print auth_fail.format(maxattempts - attempt)
attempt += 1
return auth
| 0 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova import compute
from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
vm_state="slightly crunchy", power_state=1)
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
vm_state="vm-1", power_state=1),
fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
vm_state="vm-2", power_state=2),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
class ExtendedStatusTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-STS:'
fake_url = '/v2/fake'
def _set_flags(self):
pass
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v21(
init_only=('servers',
'os-extended-status')))
return res
def setUp(self):
super(ExtendedStatusTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self._set_flags()
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerStates(self, server, vm_state, power_state, task_state):
self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
self.assertEqual(int(server.get('%spower_state' % self.prefix)),
power_state)
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
def test_show(self):
url = self.fake_url + '/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body),
vm_state='slightly crunchy',
power_state=1,
task_state='kayaking')
def test_detail(self):
url = self.fake_url + '/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server,
vm_state='vm-%s' % (i + 1),
power_state=(i + 1),
task_state='task-%s' % (i + 1))
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class ExtendedStatusTestV2(ExtendedStatusTestV21):
def _set_flags(self):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_status'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
| 0.000609 |
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
path= os.path.expanduser("~/Desktop/ece671/SwitchAnalysis/PlotDump")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
t=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/SwitchAnalysis/PlotDump/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/SwitchAnalysis/PlotDump")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
f=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/SwitchAnalysis/PlotDump/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<=1:
y=0
f.append(y)
i+=1
print(f)
plt.plot(list(range(1,len(t)+1)),t, '.-',label="traffic type and timeout")
plt.title("Total Flows Present after 1st flow")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
plt.plot(list(range(1,len(f)+1)),f, '.-',label="traffic type and timeout")
plt.title("Flows programmed per sec")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| 0.026872 |
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.models import User, Group
from account.models import *
def get_groups_for_code(regcode):
"""Returns a list of groups for a given registration code."""
grouplist = []
for group in Group.objects.filter(profile__isnull=False).all():
profile = GroupProfile.objects.get(group=group)
if profile.regcode == regcode:
grouplist.append(group)
return grouplist
def register_groups(user, regcode):
"""Registers a user for all groups associated with a registration code."""
grouplist = get_groups_for_code(regcode)
if len(grouplist) != 0:
for group in grouplist:
user.groups.add(group)
return None
| 0.001314 |
from datetime import datetime
import time
from openerp import netsvc, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Adding leaves specific details"
_inherit = "hr.employee"
_columns = {
'religion': fields.selection([('muslim','muslim'),('christian','christian')],'religion',required=True),
}
_defaults = {
'religion':'muslim',
}
hr_employee()
class motherhood_leaves(osv.osv):
_name = 'motherhood.leaves'
_description = 'motherhood leaves model'
_rec_name = 'child_order'
_columns = {
'child_order' : fields.integer(
'Child Order',
required=True,
),
'leave_balance' : fields.integer(
'Leave Balance',
required=True,
),
}
_sql_constraints = [('leave_uniq','unique(child_order, leave_balance)', 'Motherhood Leave With The Same Data Already Exist!')]
def _check_fields(self, cr, uid, ids, context=None):
records = self.browse(cr, uid, ids, context=context)
for rec in records:
if rec.child_order <= 0 or rec.leave_balance <= 0:
return False
else:
return True
_constraints = [(_check_fields, 'Error : Fiedls Must Be Positive Number Higher Than 0!', ['child_order', 'leave_balance']),]
class pilgrimage_leaves(osv.osv):
_name = 'pilgrimage.leaves'
_description = 'pilgrimage leaves model'
_rec_name = 'religion'
_columns = {
'religion' : fields.selection(
[('muslim', 'Muslim'), ('christian', 'Christian'),],
'Religion',
required=True,
),
'leave_balance' : fields.integer(
'Leave Balance',
required=True,
),
}
_sql_constraints = [('religion_uniq','unique(religion)', ' Pilgrimage Leaves With The Same Data Already Exist!')]
def _check_fields(self, cr, uid, ids, context=None):
records = self.browse(cr, uid, ids, context=context)
for rec in records:
if rec.leave_balance <=0:
return False
else:
return True
_constraints = [(_check_fields, 'Error : Leave Balance Be Positive Number And Less Than 100!', ['leave_balance']),]
class leaves_service_time(osv.osv):
_name = "leaves.service.time"
_description = "leaves structure according to years of service"
_columns = {
'from_': fields.integer('From',required=True),
'to_': fields.integer('To',required=True),
'allocation': fields.integer('Allocated leave',required=True),
}
_sql_constraints=[('unique_record','unique(from_,to_)','Service years records already there')]
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "default allocation for leave types"
_inherit = "hr.holidays.status"
_columns = {
'default_allocation': fields.integer('Allocation'),
'transfer_allocation': fields.boolean('Transfer overflow', help='Allows you to transfer last years remaining holidays for this years'),
'allocation_type': fields.selection([('1','Default allocation'),('2','Religion'),('3','Years of Service'),('4','Motherhood Leaves')],"Allocation based on",required=True),
}
_defaults = {
'allocation_type': 'Default allocation',
}
#remember to put constraints on allocation types
hr_holidays_status()
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Overriding default allocation scheme"
_inherit = "hr.holidays"
def get_Current_allocation_scheme(self, cr, uid, ids, employee_id, holiday_status_id, context=None):
if context is None:
context = {}
if employee_id and holiday_status_id:
leave_type = self.pool.get("hr.holidays.status").browse(cr, uid, [holiday_status_id], context=context)[0].allocation_type
res = 0
if leave_type:
if leave_type == '1':
ids_ = self.pool.get("hr.holidays.status").search(cr,uid ,[('allocation_type','=',leave_type)])
res = self.pool.get("hr.holidays.status").browse(cr, uid, ids_, context=context)[0].default_allocation
elif leave_type == '2':
ids_ = self.pool.get("hr.employee").search(cr, uid, [('id','=',employee_id)])
employee_religion = self.pool.get("hr.employee").browse(cr, uid, ids_, context=context)[0].religion
ids_= self.pool.get("pilgrimage.leaves").search(cr, uid, [('religion','=',employee_religion)])
res = self.pool.get("pilgrimage.leaves").browse(cr, uid, ids_, context=context)[0].leave_balance
elif leave_type == '3':
ids_ = self.pool.get("hr.contract").search(cr , uid,[('employee_id','=',employee_id)])
if ids_:
contract_start = self.pool.get("hr.contract").browse(cr, uid, ids_ ,context=context)[0].date_start
date_start = datetime.strptime(contract_start,"%Y-%m-%d")
years_of_service = (datetime.now()-date_start).days/365
ids_ = self.pool.get("leaves.service.time").search(cr, uid, [])
objs = self.pool.get("leaves.service.time").browse(cr, uid, ids_ ,context=context)
for obj in objs:
if years_of_service >= obj.from_ and years_of_service <= obj.to_:
res = obj.allocation
elif leave_type == '4':
ids_ = self.pool.get("hr.employee").search(cr, uid, [('id','=',employee_id)])
children = self.pool.get("hr.employee").browse(cr, uid, ids_, context=context)[0].children
gender = self.ppol.get("hr.employee").browse(cr, uid, ids_, context=context)[0].gender
if children == 0 or not gender or gender == "male":
res = 0
else:
ids_ = self.pool.get("motherhood.leaves").search(cr, uid, [('child_order','=',children)])
if not ids_:
ids_ = self.pool.get("motherhood.leaves").search(cr, uid, [])
objs = self.pool.get("motherhood.leaves").browse(cr, uid, ids_ ,context=context)
max_child = max([x.child_order for x in objs])
for x in objs:
if x.child_order == max_child:
res = x.leave_balance
res = self.pool.get("motherhood.leaves").browse(cr ,uid, ids_, context=context)[0].leave_balance
return {'value':{'number_of_days_temp':res}}
def _check_for_contract(self, cr, uid, ids, context=None):
rec = self.browse(cr, uid, ids)[0]
contract = self.pool.get("hr.contract").search(cr ,uid, [('employee_id','=',rec.employee_id.id)])
if contract:
return True
return False
def _default_holiday_status(self, cr, uid, ids, context=None):
if context is None:
context = {}
ids_ = self.pool.get("hr.holidays.status").search(cr, uid, [])
return self.pool.get("hr.holidays.status").browse(cr, uid, ids_[0], context=context).id
_constraints = [(_check_for_contract,'Employee has no contract specified',['holiday_status_id'])]
_defaults = {
'holiday_status_id':_default_holiday_status,
}
def leaves_allocation_cron_job(self, cr, uid, ids=all, context=None):
# get today information
day = datetime.today().day
month = datetime.today().month
# this function should run at date 1/1 every year
if day == 1 and month == 1:
emp_gender = None
emp_children = None
emp_religion = None
# get all employee ids
all_emp_ids = self.pool.get('hr.employee').search(cr, uid, [])
for emp_id in all_emp_ids:
# get information stored in each employee object
emp_info = self.pool.get('hr.employee').browse(cr, uid, emp_id)
if emp_info.gender:
emp_gender = emp_info.gender
if emp_info.children:
emp_children = emp_info.children
if emp_info.religion:
emp_religion = emp_info.religion
# get each employee contract
emp_contract_id = self.pool.get('hr.contract').search(cr ,uid, [('employee_id','=',emp_id)])
# if employee has a contract registered years of service leave
if emp_contract_id:
emp_contract = self.pool.get('hr.contract').browse(cr, uid, emp_contract_id[0])
emp_start_service = datetime.strptime(emp_contract.date_start,"%Y-%m-%d")
years_of_service = (datetime.now()-emp_start_service).days/365
service_leave_id = self.pool.get('leaves.service.time').search(cr, uid, [('from_', '<=', years_of_service), ('to_', '>=' , years_of_service)])
holiday_status_id = self.pool.get('hr.holidays.status').search(cr, uid, [('allocation_type', '=', '3')])
holiday_status_rec = self.pool.get('hr.holidays.status').browse(cr, uid, holiday_status_id[0])
if service_leave_id and holiday_status_id and holiday_status_rec.transfer_allocation:
leave_period = self.pool.get('leaves.service.time').browse(cr, uid, service_leave_id[0]).allocation
# save new leave to database --> don't try to create it with validate state directly "will not work" :)
holiday_id = self.pool.get('hr.holidays').create(cr, uid, {
'holiday_status_id' : holiday_status_id[0],
'employee_id' : emp_id,
'department_id' : emp_info.department_id.id,
'user_id' : uid,
'name' : holiday_status_rec.name,
'number_of_days' : float(leave_period),
'number_of_days_temp' : leave_period,
'holiday_type' : 'employee',
'state' : 'confirm',
'manager_id' : emp_info.parent_id.id,
'type' : 'add',
})
# change leave state to be confirmed
self.pool.get('hr.holidays').write(cr, uid, [holiday_id,], {
'state' : 'validate',
})
# if female employee and has contract add motherhood leave to new years leaves
if emp_gender == "female" and emp_children > 0 and emp_contract_id:
motherhood_leave_id = self.pool.get('motherhood.leaves').search(cr, uid, [('child_order', '=', emp_children)])
holiday_status_id = self.pool.get('hr.holidays.status').search(cr, uid, [('allocation_type', '=', '4')])
holiday_status_rec = self.pool.get('hr.holidays.status').browse(cr, uid, holiday_status_id[0])
# if motherhood leave rule id found and holiday stored in holidays types and leave can be transfered to next year.
if motherhood_leave_id and holiday_status_id and holiday_status_rec.transfer_allocation:
leave_period = self.pool.get('motherhood.leaves').browse(cr, uid, motherhood_leave_id[0]).leave_balance
# save new leave to database
holiday_id = self.pool.get('hr.holidays').create(cr, uid, {
'holiday_status_id' : holiday_status_id[0],
'employee_id' : emp_id,
'department_id' : emp_info.department_id.id,
'user_id' : uid,
'name' : holiday_status_rec.name,
'number_of_days' : float(leave_period),
'number_of_days_temp' : leave_period,
'holiday_type' : 'employee',
'state' : 'confirm',
'manager_id' : emp_info.parent_id.id,
'type' : 'add',
})
# change leave state to be confirmed
self.pool.get('hr.holidays').write(cr, uid, [holiday_id,], {
'state' : 'validate',
})
# if exeeded maximum number of children
elif holiday_status_id and holiday_status_rec.transfer_allocation:
all_moth_leaves_ids = self.pool.get("motherhood.leaves").search(cr, uid, [])
leave_objs = self.pool.get("motherhood.leaves").browse(cr, uid, all_moth_leaves_ids ,context=context)
max_leave_period = max([leave.leave_balance for leave in leave_objs])
holiday_id = self.pool.get('hr.holidays').create(cr, uid, {
'holiday_status_id' : holiday_status_id[0],
'employee_id' : emp_id,
'department_id' : emp_info.department_id.id,
'user_id' : uid,
'name' : holiday_status_rec.name,
'number_of_days' : float(max_leave_period),
'number_of_days_temp' : max_leave_period,
'holiday_type' : 'employee',
'state' : 'confirm',
'manager_id' : emp_info.parent_id.id,
'type' : 'add',
})
# change leave state to be confirmed
self.pool.get('hr.holidays').write(cr, uid, [holiday_id,], {
'state' : 'validate',
})
# if employee is muslim or christian and has contract add pilgrimage leave balance
if emp_contract_id:
pilgrimage_leave_id = self.pool.get('pilgrimage.leaves').search(cr, uid, [('religion', '=', emp_religion)])
holiday_status_id = self.pool.get('hr.holidays.status').search(cr, uid, [('allocation_type', '=', '2')])
holiday_status_rec = self.pool.get('hr.holidays.status').browse(cr, uid, holiday_status_id[0])
# if pilgrimage leave rule found and holiday stored in holidays types
if pilgrimage_leave_id and holiday_status_id and holiday_status_rec.transfer_allocation:
leave_period = self.pool.get('pilgrimage.leaves').browse(cr, uid, pilgrimage_leave_id[0]).leave_balance
# save new leave to database
holiday_id = self.pool.get('hr.holidays').create(cr, uid, {
'holiday_status_id' : holiday_status_id[0],
'employee_id' : emp_id,
'department_id' : emp_info.department_id.id,
'user_id' : uid,
'name' : holiday_status_rec.name,
'number_of_days' : float(leave_period),
'number_of_days_temp' : leave_period,
'holiday_type' : 'employee',
'state' : 'confirm',
'manager_id' : emp_info.parent_id.id,
'type' : 'add',
})
# change leave state to be confirmed
self.pool.get('hr.holidays').write(cr, uid, [holiday_id,], {
'state' : 'validate',
})
return True
hr_holidays()
| 0.037935 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Miscellaneous node types.
"""
import os.path
import re
import sys
from grit import constants
from grit import exception
from grit import util
import grit.format.rc_header
from grit.node import base
from grit.node import io
from grit.node import message
# RTL languages
# TODO(jennyz): remove this fixed set of RTL language array
# now that generic expand_variable code exists.
_RTL_LANGS = (
'ar', # Arabic
'fa', # Farsi
'iw', # Hebrew
'ks', # Kashmiri
'ku', # Kurdish
'ps', # Pashto
'ur', # Urdu
'yi', # Yiddish
)
def _ReadFirstIdsFromFile(filename, defines):
"""Read the starting resource id values from |filename|. We also
expand variables of the form <(FOO) based on defines passed in on
the command line.
Returns a tuple, the absolute path of SRCDIR followed by the
first_ids dictionary.
"""
first_ids_dict = eval(util.ReadFile(filename, util.RAW_TEXT))
src_root_dir = os.path.abspath(os.path.join(os.path.dirname(filename),
first_ids_dict['SRCDIR']))
def ReplaceVariable(matchobj):
for key, value in defines.iteritems():
if matchobj.group(1) == key:
value = os.path.abspath(value)[len(src_root_dir) + 1:]
return value
return ''
renames = []
for grd_filename in first_ids_dict:
new_grd_filename = re.sub(r'<\(([A-Za-z_]+)\)', ReplaceVariable,
grd_filename)
if new_grd_filename != grd_filename:
new_grd_filename = new_grd_filename.replace('\\', '/')
renames.append((grd_filename, new_grd_filename))
for grd_filename, new_grd_filename in renames:
first_ids_dict[new_grd_filename] = first_ids_dict[grd_filename]
del(first_ids_dict[grd_filename])
return (src_root_dir, first_ids_dict)
class SplicingNode(base.Node):
"""A node whose children should be considered to be at the same level as
its siblings for most purposes. This includes <if> and <part> nodes.
"""
def _IsValidChild(self, child):
assert self.parent, '<%s> node should never be root.' % self.name
if isinstance(child, SplicingNode):
return True # avoid O(n^2) behavior
return self.parent._IsValidChild(child)
class IfNode(SplicingNode):
"""A node for conditional inclusion of resources.
"""
def MandatoryAttributes(self):
return ['expr']
def _IsValidChild(self, child):
return (isinstance(child, (ThenNode, ElseNode)) or
super(IfNode, self)._IsValidChild(child))
def EndParsing(self):
children = self.children
self.if_then_else = False
if any(isinstance(node, (ThenNode, ElseNode)) for node in children):
if (len(children) != 2 or not isinstance(children[0], ThenNode) or
not isinstance(children[1], ElseNode)):
raise exception.UnexpectedChild(
'<if> element must be <if><then>...</then><else>...</else></if>')
self.if_then_else = True
def ActiveChildren(self):
cond = self.EvaluateCondition(self.attrs['expr'])
if self.if_then_else:
return self.children[0 if cond else 1].ActiveChildren()
else:
# Equivalent to having all children inside <then> with an empty <else>
return super(IfNode, self).ActiveChildren() if cond else []
class ThenNode(SplicingNode):
"""A <then> node. Can only appear directly inside an <if> node."""
pass
class ElseNode(SplicingNode):
"""An <else> node. Can only appear directly inside an <if> node."""
pass
class PartNode(SplicingNode):
"""A node for inclusion of sub-grd (*.grp) files.
"""
def __init__(self):
super(PartNode, self).__init__()
self.started_inclusion = False
def MandatoryAttributes(self):
return ['file']
def _IsValidChild(self, child):
return self.started_inclusion and super(PartNode, self)._IsValidChild(child)
class ReleaseNode(base.Node):
"""The <release> element."""
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (empty.IncludesNode, empty.MessagesNode,
empty.StructuresNode, empty.IdentifiersNode))
def _IsValidAttribute(self, name, value):
return (
(name == 'seq' and int(value) <= self.GetRoot().GetCurrentRelease()) or
name == 'allow_pseudo'
)
def MandatoryAttributes(self):
return ['seq']
def DefaultAttributes(self):
return { 'allow_pseudo' : 'true' }
def GetReleaseNumber():
"""Returns the sequence number of this release."""
return self.attribs['seq']
class GritNode(base.Node):
"""The <grit> root element."""
def __init__(self):
super(GritNode, self).__init__()
self.output_language = ''
self.defines = {}
self.substituter = None
self.target_platform = sys.platform
def _IsValidChild(self, child):
from grit.node import empty
return isinstance(child, (ReleaseNode, empty.TranslationsNode,
empty.OutputsNode))
def _IsValidAttribute(self, name, value):
if name not in ['base_dir', 'first_ids_file', 'source_lang_id',
'latest_public_release', 'current_release',
'enc_check', 'tc_project', 'grit_version',
'output_all_resource_defines']:
return False
if name in ['latest_public_release', 'current_release'] and value.strip(
'0123456789') != '':
return False
return True
def MandatoryAttributes(self):
return ['latest_public_release', 'current_release']
def DefaultAttributes(self):
return {
'base_dir' : '.',
'first_ids_file': '',
'grit_version': 1,
'source_lang_id' : 'en',
'enc_check' : constants.ENCODING_CHECK,
'tc_project' : 'NEED_TO_SET_tc_project_ATTRIBUTE',
'output_all_resource_defines': 'true'
}
def EndParsing(self):
super(GritNode, self).EndParsing()
if (int(self.attrs['latest_public_release'])
> int(self.attrs['current_release'])):
raise exception.Parsing('latest_public_release cannot have a greater '
'value than current_release')
self.ValidateUniqueIds()
# Add the encoding check if it's not present (should ensure that it's always
# present in all .grd files generated by GRIT). If it's present, assert if
# it's not correct.
if 'enc_check' not in self.attrs or self.attrs['enc_check'] == '':
self.attrs['enc_check'] = constants.ENCODING_CHECK
else:
assert self.attrs['enc_check'] == constants.ENCODING_CHECK, (
'Are you sure your .grd file is in the correct encoding (UTF-8)?')
def ValidateUniqueIds(self):
"""Validate that 'name' attribute is unique in all nodes in this tree
except for nodes that are children of <if> nodes.
"""
unique_names = {}
duplicate_names = []
# To avoid false positives from mutually exclusive <if> clauses, check
# against whatever the output condition happens to be right now.
# TODO(benrg): do something better.
for node in self.ActiveDescendants():
if node.attrs.get('generateid', 'true') == 'false':
continue # Duplication not relevant in that case
for node_id in node.GetTextualIds():
if util.SYSTEM_IDENTIFIERS.match(node_id):
continue # predefined IDs are sometimes used more than once
if node_id in unique_names and node_id not in duplicate_names:
duplicate_names.append(node_id)
unique_names[node_id] = 1
if len(duplicate_names):
raise exception.DuplicateKey(', '.join(duplicate_names))
def GetCurrentRelease(self):
"""Returns the current release number."""
return int(self.attrs['current_release'])
def GetLatestPublicRelease(self):
"""Returns the latest public release number."""
return int(self.attrs['latest_public_release'])
def GetSourceLanguage(self):
"""Returns the language code of the source language."""
return self.attrs['source_lang_id']
def GetTcProject(self):
"""Returns the name of this project in the TranslationConsole, or
'NEED_TO_SET_tc_project_ATTRIBUTE' if it is not defined."""
return self.attrs['tc_project']
def SetOwnDir(self, dir):
"""Informs the 'grit' element of the directory the file it is in resides.
This allows it to calculate relative paths from the input file, which is
what we desire (rather than from the current path).
Args:
dir: r'c:\bla'
Return:
None
"""
assert dir
self.base_dir = os.path.normpath(os.path.join(dir, self.attrs['base_dir']))
def GetBaseDir(self):
"""Returns the base directory, relative to the working directory. To get
the base directory as set in the .grd file, use GetOriginalBaseDir()
"""
if hasattr(self, 'base_dir'):
return self.base_dir
else:
return self.GetOriginalBaseDir()
def GetOriginalBaseDir(self):
"""Returns the base directory, as set in the .grd file.
"""
return self.attrs['base_dir']
def ShouldOutputAllResourceDefines(self):
"""Returns true if all resource defines should be output, false if
defines for resources not emitted to resource files should be
skipped.
"""
return self.attrs['output_all_resource_defines'] == 'true'
def GetInputFiles(self):
"""Returns the list of files that are read to produce the output."""
# Importing this here avoids a circular dependency in the imports.
# pylint: disable-msg=C6204
from grit.node import include
from grit.node import misc
from grit.node import structure
from grit.node import variant
# Check if the input is required for any output configuration.
input_files = set()
old_output_language = self.output_language
for lang, ctx in self.GetConfigurations():
self.SetOutputLanguage(lang or self.GetSourceLanguage())
self.SetOutputContext(ctx)
for node in self.ActiveDescendants():
if isinstance(node, (io.FileNode, include.IncludeNode, misc.PartNode,
structure.StructureNode, variant.SkeletonNode)):
input_files.add(node.GetInputPath())
self.SetOutputLanguage(old_output_language)
return sorted(map(self.ToRealPath, input_files))
def GetFirstIdsFile(self):
"""Returns a usable path to the first_ids file, if set, otherwise
returns None.
The first_ids_file attribute is by default relative to the
base_dir of the .grd file, but may be prefixed by GRIT_DIR/,
which makes it relative to the directory of grit.py
(e.g. GRIT_DIR/../gritsettings/resource_ids).
"""
if not self.attrs['first_ids_file']:
return None
path = self.attrs['first_ids_file']
GRIT_DIR_PREFIX = 'GRIT_DIR'
if (path.startswith(GRIT_DIR_PREFIX)
and path[len(GRIT_DIR_PREFIX)] in ['/', '\\']):
return util.PathFromRoot(path[len(GRIT_DIR_PREFIX) + 1:])
else:
return self.ToRealPath(path)
def GetOutputFiles(self):
"""Returns the list of <output> nodes that are descendants of this node's
<outputs> child and are not enclosed by unsatisfied <if> conditionals.
"""
for child in self.children:
if child.name == 'outputs':
return [node for node in child.ActiveDescendants()
if node.name == 'output']
raise exception.MissingElement()
def GetConfigurations(self):
"""Returns the distinct (language, context) pairs from the output nodes.
"""
return set((n.GetLanguage(), n.GetContext()) for n in self.GetOutputFiles())
def GetSubstitutionMessages(self):
"""Returns the list of <message sub_variable="true"> nodes."""
return [n for n in self.ActiveDescendants()
if isinstance(n, message.MessageNode)
and n.attrs['sub_variable'] == 'true']
def SetOutputLanguage(self, output_language):
"""Set the output language. Prepares substitutions.
The substitutions are reset every time the language is changed.
They include messages designated as variables, and language codes for html
and rc files.
Args:
output_language: a two-letter language code (eg: 'en', 'ar'...) or ''
"""
if not output_language:
# We do not specify the output language for .grh files,
# so we get an empty string as the default.
# The value should match grit.clique.MessageClique.source_language.
output_language = self.GetSourceLanguage()
if output_language != self.output_language:
self.output_language = output_language
self.substituter = None # force recalculate
def SetOutputContext(self, output_context):
self.output_context = output_context
self.substituter = None # force recalculate
def SetDefines(self, defines):
self.defines = defines
self.substituter = None # force recalculate
def SetTargetPlatform(self, target_platform):
self.target_platform = target_platform
def GetSubstituter(self):
if self.substituter is None:
self.substituter = util.Substituter()
self.substituter.AddMessages(self.GetSubstitutionMessages(),
self.output_language)
if self.output_language in _RTL_LANGS:
direction = 'dir="RTL"'
else:
direction = 'dir="LTR"'
self.substituter.AddSubstitutions({
'GRITLANGCODE': self.output_language,
'GRITDIR': direction,
})
from grit.format import rc # avoid circular dep
rc.RcSubstitutions(self.substituter, self.output_language)
return self.substituter
def AssignFirstIds(self, filename_or_stream, defines):
"""Assign first ids to each grouping node based on values from the
first_ids file (if specified on the <grit> node).
"""
# If the input is a stream, then we're probably in a unit test and
# should skip this step.
if type(filename_or_stream) not in (str, unicode):
return
# Nothing to do if the first_ids_filename attribute isn't set.
first_ids_filename = self.GetFirstIdsFile()
if not first_ids_filename:
return
src_root_dir, first_ids = _ReadFirstIdsFromFile(first_ids_filename,
defines)
from grit.node import empty
for node in self.Preorder():
if isinstance(node, empty.GroupingNode):
abs_filename = os.path.abspath(filename_or_stream)
if abs_filename[:len(src_root_dir)] != src_root_dir:
filename = os.path.basename(filename_or_stream)
else:
filename = abs_filename[len(src_root_dir) + 1:]
filename = filename.replace('\\', '/')
if node.attrs['first_id'] != '':
raise Exception(
"Don't set the first_id attribute when using the first_ids_file "
"attribute on the <grit> node, update %s instead." %
first_ids_filename)
try:
id_list = first_ids[filename][node.name]
except KeyError, e:
print '-' * 78
print 'Resource id not set for %s (%s)!' % (filename, node.name)
print ('Please update %s to include an entry for %s. See the '
'comments in resource_ids for information on why you need to '
'update that file.' % (first_ids_filename, filename))
print '-' * 78
raise e
try:
node.attrs['first_id'] = str(id_list.pop(0))
except IndexError, e:
raise Exception('Please update %s and add a first id for %s (%s).'
% (first_ids_filename, filename, node.name))
def RunGatherers(self, debug=False):
'''Call RunPreSubstitutionGatherer() on every node of the tree, then apply
substitutions, then call RunPostSubstitutionGatherer() on every node.
The substitutions step requires that the output language has been set.
Locally, get the Substitution messages and add them to the substituter.
Also add substitutions for language codes in the Rc.
Args:
debug: will print information while running gatherers.
'''
for node in self.ActiveDescendants():
if hasattr(node, 'RunPreSubstitutionGatherer'):
with node:
node.RunPreSubstitutionGatherer(debug=debug)
assert self.output_language
self.SubstituteMessages(self.GetSubstituter())
for node in self.ActiveDescendants():
if hasattr(node, 'RunPostSubstitutionGatherer'):
with node:
node.RunPostSubstitutionGatherer(debug=debug)
class IdentifierNode(base.Node):
"""A node for specifying identifiers that should appear in the resource
header file, and be unique amongst all other resource identifiers, but don't
have any other attributes or reference any resources.
"""
def MandatoryAttributes(self):
return ['name']
def DefaultAttributes(self):
return { 'comment' : '', 'id' : '', 'systemid': 'false' }
def GetId(self):
"""Returns the id of this identifier if it has one, None otherwise
"""
if 'id' in self.attrs:
return self.attrs['id']
return None
def EndParsing(self):
"""Handles system identifiers."""
super(IdentifierNode, self).EndParsing()
if self.attrs['systemid'] == 'true':
util.SetupSystemIdentifiers((self.attrs['name'],))
@staticmethod
def Construct(parent, name, id, comment, systemid='false'):
"""Creates a new node which is a child of 'parent', with attributes set
by parameters of the same name.
"""
node = IdentifierNode()
node.StartParsing('identifier', parent)
node.HandleAttribute('name', name)
node.HandleAttribute('id', id)
node.HandleAttribute('comment', comment)
node.HandleAttribute('systemid', systemid)
node.EndParsing()
return node
| 0.008328 |
class JustAClass:
def __init__(self):
# Non-mobile videos: for the old pages
self.nonMobileVideoItemRegex = 'src="(?<Image>[^"]+)"\W+>(?<Premium><div class="not-' \
'available-image-overlay">)?[\w\W]{0,500}?</a></div>\W*' \
'</div>\W*<div[^>]*>\W*<a href="(?<Url>[^"]+/(?<Day>\d+)-' \
'(?<Month>\d+)-(?<Year>\d+)/(?<WhatsOnId>[^/"]+))"[^>]*>' \
'<h4>(?<Title>[^<]+)<[\W\w]{0,600}?<p[^>]+>(?<Description>' \
'[^<]*)'.replace('(?<', '(?P<')
# Non-mobile videos: for the new pages
self.nonMobileVideoItemRege2 = 'src="(?<Image>[^"]+)"[^>]+>\W*</a></div>\W*<div[^>]*>\W*<h3><a href="' \
'(?<Url>[^"]+/(?<Day>\d+)-(?<Month>\d+)-(?<Year>\d+)/(?<WhatsOnId>[^/"]+))"' \
'[^>]*>(?<Title>[^<]+)<[\W\w]{0,600}?<p[^>]*>' \
'(?<Description>[^<]*)'.replace('(?<', '(?P<') | 0.026221 |
"""
This file contains celery tasks for credentials-related functionality.
"""
from celery import task
from celery.utils.log import get_task_logger
from django.conf import settings
from django.contrib.auth.models import User
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.credentials.utils import get_credentials_api_client
logger = get_task_logger(__name__)
# Under cms the following setting is not defined, leading to errors during tests.
# These tasks aren't strictly credentials generation, but are similar in the sense
# that they generate records on the credentials side. And have a similar SLA.
ROUTING_KEY = getattr(settings, 'CREDENTIALS_GENERATION_ROUTING_KEY', None)
# Maximum number of retries before giving up.
# For reference, 11 retries with exponential backoff yields a maximum waiting
# time of 2047 seconds (about 30 minutes). Setting this to None could yield
# unwanted behavior: infinite retries.
MAX_RETRIES = 11
@task(bind=True, ignore_result=True, routing_key=ROUTING_KEY)
def send_grade_to_credentials(self, username, course_run_key, verified, letter_grade, percent_grade):
""" Celery task to notify the Credentials IDA of a grade change via POST. """
logger.info(u'Running task send_grade_to_credentials for username %s and course %s', username, course_run_key)
countdown = 2 ** self.request.retries
course_key = CourseKey.from_string(course_run_key)
try:
credentials_client = get_credentials_api_client(
User.objects.get(username=settings.CREDENTIALS_SERVICE_USERNAME),
org=course_key.org,
)
credentials_client.grades.post({
'username': username,
'course_run': str(course_key),
'letter_grade': letter_grade,
'percent_grade': percent_grade,
'verified': verified,
})
logger.info(u'Sent grade for course %s to user %s', course_run_key, username)
except Exception as exc:
logger.exception(u'Failed to send grade for course %s to user %s', course_run_key, username)
raise self.retry(exc=exc, countdown=countdown, max_retries=MAX_RETRIES)
| 0.003702 |
import Image, numpy as np,math
import scipy as sp
from scipy.interpolate import interp1d
from pylab import *
from skimage import io, color
import cv2
#Source colour
R,G,B = (102.,0.,51.)
inten = 0.8
lower_left_end = 5
upper_left_end = 11
lower_right_end = 16
upper_right_end = 22
def inter(lx=[],ly=[],k1='quadratic'):
unew = np.arange(lx[0], lx[-1]+1, 1)
f2 = interp1d(lx, ly, kind=k1)
return (f2,unew)
def inter_plot(lx=[],ly=[],k1='quadratic'):
unew = np.arange(lx[0], lx[-1]+1, 1)
f2 = interp1d(lx, ly, kind=k1)
return unew,f2(unew)
def ext(a,b,i):
x.extend(arange(a,b,1).tolist())
if(b-a==1):
y.extend((ones(b-a)*i).tolist())
else:
y.extend((ones(b-a+1)*i).tolist())
def extleft(a,b,i):
xleft.extend(arange(a,b,1).tolist())
if(b-a==1):
yleft.extend((ones(b-a)*i).tolist())
else:
yleft.extend((ones(b-a+1)*i).tolist())
def extright(a,b,i):
xright.extend(arange(a,b,1).tolist())
if(b-a==1):
yright.extend((ones(b-a)*i).tolist())
else:
yright.extend((ones(b-a+1)*i).tolist())
file = np.loadtxt('pointeyeshadow.txt')
points = np.floor(file)
point_down_x = np.array((points[:lower_left_end][:,0]))
point_down_y = np.array(points[:lower_left_end][:,1])
point_up_x = np.array(points[lower_left_end:upper_left_end][:,0])
point_up_y = np.array(points[lower_left_end:upper_left_end][:,1])
point_down_x_right = np.array((points[upper_left_end:lower_right_end][:,0]))
point_down_y_right = np.array(points[upper_left_end:lower_right_end][:,1])
point_up_x_right = np.array((points[lower_right_end:upper_right_end][:,0]))
point_up_y_right = np.array(points[lower_right_end:upper_right_end][:,1])
im = imread('out1.jpg')
# imshow(im)
# plot((point_down_x[:],point_down_y[:],'cubic')[0], (point_down_x[:],point_down_y[:],'cubic')[1], 'ro')
# plot((point_up_x[:],point_up_y[:],'cubic')[0], (point_up_x[:],point_up_y[:],'cubic')[1], 'ro')
# plot((point_down_x_right[:],point_down_y_right[:],'cubic')[0], (point_down_x_right[:],point_down_y_right[:],'cubic')[1], 'ro')
# plot((point_up_x_right[:],point_up_y_right[:],'cubic')[0], (point_up_x_right[:],point_up_y_right[:],'cubic')[1], 'ro')
point_down_y_max = max(point_down_y)
point_up_y_min = min(point_up_y)
offset_left = point_down_y_max - point_up_y_min
point_up_y[0] += offset_left*0.625
point_up_y[1] += offset_left*0.3
point_up_y[2] += offset_left*0.15
point_up_y[3] += offset_left*0.1
point_up_y[4] += offset_left*0.2
point_down_y[0] += offset_left*0.625
point_down_y_right_max = max(point_down_y_right)
point_up_y_right_min = min(point_up_y_right)
offset_right = point_down_y_right_max - point_up_y_right_min
point_up_y_right[-1] += offset_right*0.625
point_up_y_right[1] += offset_right*0.2
point_up_y_right[2] += offset_right*0.1
point_up_y_right[3] += offset_right*0.15
point_up_y_right[4] += offset_right*0.3
point_down_y_right[-1] += offset_right*0.625
# plot((point_up_x[:],point_up_y[:],'cubic')[0], (point_up_x[:],point_up_y[:],'cubic')[1], 'go')
# plot((point_up_x_right[:],point_up_y_right[:],'cubic')[0], (point_up_x_right[:],point_up_y_right[:],'cubic')[1], 'go')
# gca().set_aspect('equal', adjustable='box')
# show()
figure()
im = imread('Input.jpg')
im2 = imread('Input.jpg')
height,width = im.shape[:2]
l_l = inter(point_down_x[:],point_down_y[:],'cubic')
u_l = inter(point_up_x[:],point_up_y[:],'cubic')
l_r = inter(point_down_x_right[:],point_down_y_right[:],'cubic')
u_r = inter(point_up_x_right[:],point_up_y_right[:],'cubic')
L,A,bB = 0,0,0
x = []
y = []
xleft=[]
yleft=[]
xright=[]
yright=[]
for i in range(int(l_l[1][0]),int(l_l[1][-1]+1)):
ext(u_l[0](i),l_l[0](i)+1,i)
extleft(u_l[0](i),l_l[0](i)+1,i)
for i in range(int(l_r[1][0]),int(l_r[1][-1]+1)):
ext(u_r[0](i),l_r[0](i)+1,i)
extright(u_r[0](i),l_r[0](i)+1,i)
val = color.rgb2lab((im[x,y]/255.).reshape(len(x),1,3)).reshape(len(x),3)
L = mean(val[:,0])
A = mean(val[:,1])
bB = mean(val[:,2])
rgbmean = (im[x,y])
rmean = mean(rgbmean[:,0])
gmean = mean(rgbmean[:,1])
bmean = mean(rgbmean[:,2])
# print rmean, gmean, bmean
L,A,bB = color.rgb2lab(np.array((rmean/255.,gmean/255.,bmean/255.)).reshape(1,1,3)).reshape(3,)
L1,A1,B1 = color.rgb2lab(np.array((R/255.,G/255.,B/255.)).reshape(1,1,3)).reshape(3,)
val[:,0] += (L1-L)*inten
val[:,1] += (A1-A)*inten
val[:,2] += (B1-bB)*inten
image_blank = imread('Input.jpg')
image_blank *= 0
image_blank[x,y] = color.lab2rgb(val.reshape(len(x),1,3)).reshape(len(x),3)*255
original = color.rgb2lab((im[x,y]*0/255.).reshape(len(x),1,3)).reshape(len(x),3)
tobeadded = color.rgb2lab((image_blank[x,y]/255.).reshape(len(x),1,3)).reshape(len(x),3)
original += tobeadded
im[x,y] = color.lab2rgb(original.reshape(len(x),1,3)).reshape(len(x),3)*255
# Blur Filter
filter = np.zeros((height,width))
cv2.fillConvexPoly(filter,np.array(c_[yleft, xleft],dtype = 'int32'),1)
cv2.fillConvexPoly(filter,np.array(c_[yright, xright],dtype = 'int32'),1)
plt.imshow(filter)
filter = cv2.GaussianBlur(filter,(31,31),0)
# Erosion to reduce blur size
kernel = np.ones((12,12),np.uint8)
filter = cv2.erode(filter,kernel,iterations = 1)
alpha=np.zeros([height,width,3],dtype='float64')
alpha[:,:,0]=filter
alpha[:,:,1]=filter
alpha[:,:,2]=filter
imshow((alpha*im+(1-alpha)*im2).astype('uint8'))
# plot((point_down_x[:],point_down_y[:],'cubic')[0], (point_down_x[:],point_down_y[:],'cubic')[1], 'ro')
# plot((point_down_x[:],point_down_y[:],'cubic')[0], (point_down_x[:],point_down_y[:],'cubic')[1], 'r-')
# plot((point_up_x[:],point_up_y[:],'cubic')[0], (point_up_x[:],point_up_y[:],'cubic')[1], 'ro')
# plot((point_up_x[:],point_up_y[:],'cubic')[0], (point_up_x[:],point_up_y[:],'cubic')[1], 'r-')
# plot((point_down_x_right[:],point_down_y_right[:],'cubic')[0], (point_down_x_right[:],point_down_y_right[:],'cubic')[1], 'ro')
# plot((point_down_x_right[:],point_down_y_right[:],'cubic')[0], (point_down_x_right[:],point_down_y_right[:],'cubic')[1], 'r-')
# plot((point_up_x_right[:],point_up_y_right[:],'cubic')[0], (point_up_x_right[:],point_up_y_right[:],'cubic')[1], 'ro')
# plot((point_up_x_right[:],point_up_y_right[:],'cubic')[0], (point_up_x_right[:],point_up_y_right[:],'cubic')[1], 'r-')
gca().set_aspect('equal', adjustable='box')
imsave('out1.jpg',(alpha*im+(1-alpha)*im2).astype('uint8'))
show() | 0.030843 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import copy
from itertools import izip as zip, count
from nupic.bindings.algorithms import TemporalMemory as TM
from scipy.spatial.distance import cosine
from htmresearch.frameworks.poirazi_neuron_model.data_tools import (
generate_evenly_distributed_data_sparse)
from multiprocessing import Pool, cpu_count
numpy.random.seed(19)
def convert_cell_lists_to_dense(dim, cell_list, add_1 = False):
if add_1:
dense_cell_list = numpy.zeros((len(cell_list), dim + 1))
else:
dense_cell_list = numpy.zeros((len(cell_list), dim))
for i, datapoint in enumerate(cell_list):
for cell in datapoint:
dense_cell_list[i, int(cell)] = 1
if add_1:
dense_cell_list[i, dim] = 1
return dense_cell_list
def run_tm_dim_experiment(test_dims = range(300, 3100, 100),
cellsPerColumn=1,
num_active = 256,
activationThreshold=10,
initialPermanence=0.8,
connectedPermanence=0.50,
minThreshold=10,
maxNewSynapseCount=20,
permanenceIncrement=0.05,
permanenceDecrement=0.00,
predictedSegmentDecrement=0.000,
maxSegmentsPerCell=4000,
maxSynapsesPerSegment=255,
seed=42,
num_samples = 1000,
sequence_length = 20,
training_iters = 1,
automatic_threshold = False,
save_results = True):
"""
Run an experiment tracking the performance of the temporal memory given
different input dimensions. The number of active cells is kept fixed, so we
are in effect varying the sparsity of the input. We track performance by
comparing the cells predicted to be active with the cells actually active in
the sequence without noise at every timestep, and averaging across timesteps.
Three metrics are used, correlation (Pearson's r, by numpy.corrcoef),
set similarity (Jaccard index) and cosine similarity (using
scipy.spatial.distance.cosine). The Jaccard set similarity is the
canonical metric used in the paper, but all three tend to produce very similar
results.
Output is written to tm_dim_{num_active}.txt, including sample size.
In our experiments, we used the set similarity metric (third column in output)
along with three different values for num_active, 64, 128 and 256. We used
dimensions from 300 to 2900 in each case, testing every 100. 1000 sequences
of length 20 were passed to the TM in each trial.
"""
if automatic_threshold:
activationThreshold = min(num_active/2, maxNewSynapseCount/2)
minThreshold = min(num_active/2, maxNewSynapseCount/2)
print "Using activation threshold {}".format(activationThreshold)
for dim in test_dims:
tm = TM(columnDimensions=(dim,),
cellsPerColumn=cellsPerColumn,
activationThreshold=activationThreshold,
initialPermanence=initialPermanence,
connectedPermanence=connectedPermanence,
minThreshold=minThreshold,
maxNewSynapseCount=maxNewSynapseCount,
permanenceIncrement=permanenceIncrement,
permanenceDecrement=permanenceDecrement,
predictedSegmentDecrement=predictedSegmentDecrement,
maxSegmentsPerCell=maxSegmentsPerCell,
maxSynapsesPerSegment=maxSynapsesPerSegment,
seed=seed)
tm.setMinThreshold(1000)
datapoints = []
canonical_active_cells = []
for sample in range(num_samples):
if (sample + 1) % 10 == 0:
print sample + 1
data = generate_evenly_distributed_data_sparse(dim = dim, num_active = num_active, num_samples = sequence_length)
datapoints.append(data)
for i in range(training_iters):
for j in range(data.nRows()):
activeColumns = set(data.rowNonZeros(j)[0])
tm.compute(activeColumns, learn = True)
tm.reset()
current_active_cells = []
for j in range(data.nRows()):
activeColumns = set(data.rowNonZeros(j)[0])
tm.compute(activeColumns, learn = True)
current_active_cells.append(tm.getActiveCells())
canonical_active_cells.append(current_active_cells)
tm.reset()
# Now that the TM has been trained, check its performance on each sequence with noise added.
correlations = []
similarities = []
csims = []
for datapoint, active_cells in zip(datapoints, canonical_active_cells):
data = copy.deepcopy(datapoint)
predicted_cells = []
for j in range(data.nRows()):
activeColumns = set(data.rowNonZeros(j)[0])
tm.compute(activeColumns, learn = False)
predicted_cells.append(tm.getPredictiveCells())
tm.reset()
similarity = [(0.+len(set(predicted) & set(active)))/len((set(predicted) | set(active))) for predicted, active in zip (predicted_cells[:-1], active_cells[1:])]
dense_predicted_cells = convert_cell_lists_to_dense(dim*cellsPerColumn, predicted_cells[:-1])
dense_active_cells = convert_cell_lists_to_dense(dim*cellsPerColumn, active_cells[1:])
correlation = [numpy.corrcoef(numpy.asarray([predicted, active]))[0, 1] for predicted, active in zip(dense_predicted_cells, dense_active_cells)]
csim = [1 - cosine(predicted, active) for predicted, active in zip(dense_predicted_cells, dense_active_cells)]
correlation = numpy.nan_to_num(correlation)
csim = numpy.nan_to_num(csim)
correlations.append(numpy.mean(correlation))
similarities.append(numpy.mean(similarity))
csims.append(numpy.mean(csim))
correlation = numpy.mean(correlations)
similarity = numpy.mean(similarities)
csim = numpy.mean(csims)
print dim, correlation, similarity, csim
if save_results:
with open("tm_dim_{}.txt".format(num_active), "a") as f:
f.write(str(dim)+", " + str(correlation) + ", " + str(similarity) + ", " + str(csim) + ", " + str(num_samples) + "\n")
def exp_wrapper(params):
return run_tm_dim_experiment(**params)
if __name__ == "__main__":
p = Pool(cpu_count())
exp_params = []
for dim in reversed(range(300, 4100, 100)):
for num_active in [256, 128, 64]:
exp_params.append({
"test_dims": [dim],
"num_active" : num_active,
"seed": dim*num_active
})
p.map(exp_wrapper, exp_params)
| 0.011144 |
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' physicalplanhandler.py '''
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.tools.tracker.src.python.handlers import BaseHandler
class PhysicalPlanHandler(BaseHandler):
"""
URL - /topologies/physicalplan
Parameters:
- cluster (required)
- role - (optional) Role used to submit the topology.
- environ (required)
- topology (required) name of the requested topology
The response JSON is a dictionary with all the
information of physical plan of the topology.
"""
# pylint: disable=attribute-defined-outside-init
def initialize(self, tracker):
"""initialize"""
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
"""get method"""
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
physical_plan = topology_info["physical_plan"]
self.write_success_response(physical_plan)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
| 0.008197 |
# Python side of the support for xmethods.
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for defining xmethods"""
import gdb
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class XMethod(object):
"""Base class (or a template) for an xmethod description.
Currently, the description requires only the 'name' and 'enabled'
attributes. Description objects are managed by 'XMethodMatcher'
objects (see below). Note that this is only a template for the
interface of the XMethodMatcher.methods objects. One could use
this class or choose to use an object which supports this exact same
interface. Also, an XMethodMatcher can choose not use it 'methods'
attribute. In such cases this class (or an equivalent) is not used.
Attributes:
name: The name of the xmethod.
enabled: A boolean indicating if the xmethod is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
class XMethodMatcher(object):
"""Abstract base class for matching an xmethod.
When looking for xmethods, GDB invokes the `match' method of a
registered xmethod matcher to match the object type and method name.
The `match' method in concrete classes derived from this class should
return an `XMethodWorker' object, or a list of `XMethodWorker'
objects if there is a match (see below for 'XMethodWorker' class).
Attributes:
name: The name of the matcher.
enabled: A boolean indicating if the matcher is enabled.
methods: A sequence of objects of type 'XMethod', or objects
which have at least the attributes of an 'XMethod' object.
This list is used by the 'enable'/'disable'/'info' commands to
enable/disable/list the xmethods registered with GDB. See
the 'match' method below to know how this sequence is used.
This attribute is None if the matcher chooses not have any
xmethods managed by it.
"""
def __init__(self, name):
"""
Args:
name: An identifying name for the xmethod or the group of
xmethods returned by the `match' method.
"""
self.name = name
self.enabled = True
self.methods = None
def match(self, class_type, method_name):
"""Match class type and method name.
In derived classes, it should return an XMethodWorker object, or a
sequence of 'XMethodWorker' objects. Only those xmethod workers
whose corresponding 'XMethod' descriptor object is enabled should be
returned.
Args:
class_type: The class type (gdb.Type object) to match.
method_name: The name (string) of the method to match.
"""
raise NotImplementedError("XMethodMatcher match")
class XMethodWorker(object):
"""Base class for all xmethod workers defined in Python.
An xmethod worker is an object which matches the method arguments, and
invokes the method when GDB wants it to. Internally, GDB first invokes the
'get_arg_types' method to perform overload resolution. If GDB selects to
invoke this Python xmethod, then it invokes it via the overridden
'__call__' method. The 'get_result_type' method is used to implement
'ptype' on the xmethod.
Derived classes should override the 'get_arg_types', 'get_result_type'
and '__call__' methods.
"""
def get_arg_types(self):
"""Return arguments types of an xmethod.
A sequence of gdb.Type objects corresponding to the arguments of the
xmethod are returned. If the xmethod takes no arguments, then 'None'
or an empty sequence is returned. If the xmethod takes only a single
argument, then a gdb.Type object or a sequence with a single gdb.Type
element is returned.
"""
raise NotImplementedError("XMethodWorker get_arg_types")
def get_result_type(self, *args):
"""Return the type of the result of the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value. These are the same arguments passed to '__call__'.
Returns:
A gdb.Type object representing the type of the result of the
xmethod.
"""
raise NotImplementedError("XMethodWorker get_result_type")
def __call__(self, *args):
"""Invoke the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value.
Returns:
A gdb.Value corresponding to the value returned by the xmethod.
Returns 'None' if the method does not return anything.
"""
raise NotImplementedError("XMethodWorker __call__")
class SimpleXMethodMatcher(XMethodMatcher):
"""A utility class to implement simple xmethod mathers and workers.
See the __init__ method below for information on how instances of this
class can be used.
For simple classes and methods, one can choose to use this class. For
complex xmethods, which need to replace/implement template methods on
possibly template classes, one should implement their own xmethod
matchers and workers. See py-xmethods.py in testsuite/gdb.python
directory of the GDB source tree for examples.
"""
class SimpleXMethodWorker(XMethodWorker):
def __init__(self, method_function, arg_types):
self._arg_types = arg_types
self._method_function = method_function
def get_arg_types(self):
return self._arg_types
def __call__(self, *args):
return self._method_function(*args)
def __init__(self, name, class_matcher, method_matcher, method_function,
*arg_types):
"""
Args:
name: Name of the xmethod matcher.
class_matcher: A regular expression used to match the name of the
class whose method this xmethod is implementing/replacing.
method_matcher: A regular expression used to match the name of the
method this xmethod is implementing/replacing.
method_function: A Python callable which would be called via the
'invoke' method of the worker returned by the objects of this
class. This callable should accept the object (*this) as the
first argument followed by the rest of the arguments to the
method. All arguments to this function should be gdb.Value
objects.
arg_types: The gdb.Type objects corresponding to the arguments that
this xmethod takes. It can be None, or an empty sequence,
or a single gdb.Type object, or a sequence of gdb.Type objects.
"""
XMethodMatcher.__init__(self, name)
assert callable(method_function), (
"The 'method_function' argument to 'SimpleXMethodMatcher' "
"__init__ method should be a callable.")
self._method_function = method_function
self._class_matcher = class_matcher
self._method_matcher = method_matcher
self._arg_types = arg_types
def match(self, class_type, method_name):
cm = re.match(self._class_matcher, str(class_type.unqualified().tag))
mm = re.match(self._method_matcher, method_name)
if cm and mm:
return SimpleXMethodMatcher.SimpleXMethodWorker(
self._method_function, self._arg_types)
# A helper function for register_xmethod_matcher which returns an error
# object if MATCHER is not having the requisite attributes in the proper
# format.
def _validate_xmethod_matcher(matcher):
if not hasattr(matcher, "match"):
return TypeError("Xmethod matcher is missing method: match")
if not hasattr(matcher, "name"):
return TypeError("Xmethod matcher is missing attribute: name")
if not hasattr(matcher, "enabled"):
return TypeError("Xmethod matcher is missing attribute: enabled")
if not isinstance(matcher.name, basestring):
return TypeError("Attribute 'name' of xmethod matcher is not a "
"string")
if matcher.name.find(";") >= 0:
return ValueError("Xmethod matcher name cannot contain ';' in it")
# A helper function for register_xmethod_matcher which looks up an
# xmethod matcher with NAME in LOCUS. Returns the index of the xmethod
# matcher in 'xmethods' sequence attribute of the LOCUS. If NAME is not
# found in LOCUS, then -1 is returned.
def _lookup_xmethod_matcher(locus, name):
for i in range(0, len(locus.xmethods)):
if locus.xmethods[i].name == name:
return i
return -1
def register_xmethod_matcher(locus, matcher, replace=False):
"""Registers a xmethod matcher MATCHER with a LOCUS.
Arguments:
locus: The locus in which the xmethods should be registered.
It can be 'None' to indicate that the xmethods should be
registered globally. Or, it could be a gdb.Objfile or a
gdb.Progspace object in which the xmethods should be
registered.
matcher: The xmethod matcher to register with the LOCUS. It
should be an instance of 'XMethodMatcher' class.
replace: If True, replace any existing xmethod matcher with the
same name in the locus. Otherwise, if a matcher with the same name
exists in the locus, raise an exception.
"""
err = _validate_xmethod_matcher(matcher)
if err:
raise err
if not locus:
locus = gdb
if locus == gdb:
locus_name = "global"
else:
locus_name = locus.filename
index = _lookup_xmethod_matcher(locus, matcher.name)
if index >= 0:
if replace:
del locus.xmethods[index]
else:
raise RuntimeError("Xmethod matcher already registered with "
"%s: %s" % (locus_name, matcher.name))
if gdb.parameter("verbose"):
gdb.write("Registering xmethod matcher '%s' with %s' ...\n")
locus.xmethods.insert(0, matcher)
| 0.000181 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""Implement a MSM class that builds a Markov state models from
microstate trajectories, automatically computes important properties
and provides them for later access.
.. moduleauthor:: F. Noe <frank DOT noe AT fu-berlin DOT de>
"""
from __future__ import absolute_import
from pyemma._base.model import SampledModel as _SampledModel
from pyemma.msm.models.hmsm import HMSM as _HMSM
from pyemma.util.types import is_iterable
class SampledHMSM(_HMSM, _SampledModel):
r""" Sampled Hidden Markov state model """
__serialize_version = 0
# TODO: maybe rename to parametrize in order to avoid confusion with set_params that has a different behavior?
def set_model_params(self, samples=None, conf=0.95,
P=None, pobs=None, pi=None, reversible=None, dt_model='1 step', neig=None):
"""
Parameters
----------
samples : list of MSM objects
sampled MSMs
conf : float, optional, default=0.68
Confidence interval. By default one-sigma (68.3%) is used. Use 95.4% for two sigma or 99.7% for three sigma.
"""
# set model parameters of superclass
_SampledModel.set_model_params(self, samples=samples, conf=conf)
_HMSM.set_model_params(self, P=P, pobs=pobs, pi=pi, reversible=reversible, dt_model=dt_model, neig=neig)
def submodel(self, states=None, obs=None):
"""Returns a HMM with restricted state space
Parameters
----------
states : None or int-array
Hidden states to restrict the model to (if not None).
obs : None, str or int-array
Observed states to restrict the model to (if not None).
Returns
-------
hmm : HMM
The restricted HMM.
"""
# get the reference HMM submodel
ref = super(SampledHMSM, self).submodel(states=states, obs=obs)
# get the sample submodels
samples_sub = [sample.submodel(states=states, obs=obs) for sample in self.samples]
# new model
return SampledHMSM(samples_sub, ref=ref, conf=self.conf)
| 0.002076 |
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core import signing
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(
self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies',
)
except Exception:
# BadSignature, ValueError, or unpickling exceptions. If any of
# these happen, reset the session.
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
return signing.dumps(
self._session, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer,
)
@classmethod
def clear_expired(cls):
pass
| 0 |
#!/usr/bin/env python
import numpy
#import pylab
def princomp(A,numpc=4,reconstruct=False,getEigenValues=True):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - numpy.atleast_2d(numpy.mean(A,axis=1)).T) # subtract the mean (along columns)
# print 'A:%s'%A
# print 'M:%s'%M
# print 'cov:%s'%numpy.cov(M)
[eigenValues,eigenVectors] = numpy.linalg.eig(numpy.cov(M))
p = numpy.size(eigenVectors,axis=1)
idx = numpy.argsort(eigenValues) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
eigenVectors = eigenVectors[:,idx]
eigenValues = eigenValues[idx] # sorting eigenvalues
if numpc < p or numpc >= 0:
eigenVectors = eigenVectors[:,range(numpc)] # cutting some PCs
# eigenValues = eigenValues[range(numpc)]
#data reconstruction
if reconstruct:
# A_r = numpy.zeros_like(A)
# for i in range(numpc):
# A_r = A_r + eigenValues[i]*numpy.dot(numpy.atleast_2d(eigenVectors[:,i]).T,numpy.atleast_2d(eigenVectors[:,i]))
score = numpy.dot(eigenVectors.T,M) # projection of the data in the new space
Ar = (numpy.dot(eigenVectors,score)+numpy.mean(A,axis=0)).T # image reconstruction
return eigenVectors.real,eigenValues.real,Ar
else:
if getEigenValues:
return eigenVectors.real,eigenValues.real
else:
return eigenVectors.real
| 0.037092 |
"""SCons.Tool.yacc
Tool-specific initialization for yacc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/yacc.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import SCons.Defaults
import SCons.Tool
import SCons.Util
YaccAction = SCons.Action.Action("$YACCCOM", "$YACCCOMSTR")
def _yaccEmitter(target, source, env, ysuf, hsuf):
yaccflags = env.subst("$YACCFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(yaccflags)
targetBase, targetExt = os.path.splitext(SCons.Util.to_String(target[0]))
if '.ym' in ysuf: # If using Objective-C
target = [targetBase + ".m"] # the extension is ".m".
# If -d is specified on the command line, yacc will emit a .h
# or .hpp file with the same name as the .c or .cpp output file.
if '-d' in flags:
target.append(targetBase + env.subst(hsuf, target=target, source=source))
# If -g is specified on the command line, yacc will emit a .vcg
# file with the same base name as the .y, .yacc, .ym or .yy file.
if "-g" in flags:
base, ext = os.path.splitext(SCons.Util.to_String(source[0]))
target.append(base + env.subst("$YACCVCGFILESUFFIX"))
# If -v is specified yacc will create the output debug file
# which is not really source for any process, but should
# be noted and also be cleaned
# Bug #2558
if "-v" in flags:
env.SideEffect(targetBase+'.output',target[0])
env.Clean(target[0],targetBase+'.output')
# With --defines and --graph, the name of the file is totally defined
# in the options.
fileGenOptions = ["--defines=", "--graph="]
for option in flags:
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the file
# name to the list of targets.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def yEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.y', '.yacc'], '$YACCHFILESUFFIX')
def ymEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.ym'], '$YACCHFILESUFFIX')
def yyEmitter(target, source, env):
return _yaccEmitter(target, source, env, ['.yy'], '$YACCHXXFILESUFFIX')
def generate(env):
"""Add Builders and construction variables for yacc to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action('.y', YaccAction)
c_file.add_emitter('.y', yEmitter)
c_file.add_action('.yacc', YaccAction)
c_file.add_emitter('.yacc', yEmitter)
# Objective-C
c_file.add_action('.ym', YaccAction)
c_file.add_emitter('.ym', ymEmitter)
# C++
cxx_file.add_action('.yy', YaccAction)
cxx_file.add_emitter('.yy', yyEmitter)
env['YACC'] = env.Detect('bison') or 'yacc'
env['YACCFLAGS'] = SCons.Util.CLVar('')
env['YACCCOM'] = '$YACC $YACCFLAGS -o $TARGET $SOURCES'
env['YACCHFILESUFFIX'] = '.h'
env['YACCHXXFILESUFFIX'] = '.hpp'
env['YACCVCGFILESUFFIX'] = '.vcg'
def exists(env):
return env.Detect(['bison', 'yacc'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 0.003685 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_hotspot20_h2qp_operator_name
short_description: Configure operator friendly name in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller_hotspot20 feature and h2qp_operator_name category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_hotspot20_h2qp_operator_name:
description:
- Configure operator friendly name.
default: null
type: dict
suboptions:
name:
description:
- Friendly name ID.
required: true
type: str
value_list:
description:
- Name list.
type: list
suboptions:
index:
description:
- Value index.
required: true
type: int
lang:
description:
- Language code.
type: str
value:
description:
- Friendly name value.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure operator friendly name.
fortios_wireless_controller_hotspot20_h2qp_operator_name:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_hotspot20_h2qp_operator_name:
name: "default_name_3"
value_list:
-
index: "5"
lang: "<your_own_value>"
value: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_hotspot20_h2qp_operator_name_data(json):
option_list = ['name', 'value_list']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_hotspot20_h2qp_operator_name(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_hotspot20_h2qp_operator_name_data = data['wireless_controller_hotspot20_h2qp_operator_name']
filtered_data = underscore_to_hyphen(filter_wireless_controller_hotspot20_h2qp_operator_name_data(wireless_controller_hotspot20_h2qp_operator_name_data))
if state == "present":
return fos.set('wireless-controller.hotspot20',
'h2qp-operator-name',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller.hotspot20',
'h2qp-operator-name',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller_hotspot20(data, fos):
if data['wireless_controller_hotspot20_h2qp_operator_name']:
resp = wireless_controller_hotspot20_h2qp_operator_name(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_hotspot20_h2qp_operator_name": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"value_list": {"required": False, "type": "list",
"options": {
"index": {"required": True, "type": "int"},
"lang": {"required": False, "type": "str"},
"value": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 0.001595 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '[email protected] (Matt Toia)'
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._file_desc_protos_by_file[file_desc_proto.name] = file_desc_proto
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
return self._file_desc_protos_by_symbol[symbol]
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| 0.003174 |
"""
JPEG picture parser.
Information:
- APP14 documents
http://partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf
http://java.sun.com/j2se/1.5.0/docs/api/javax/imageio/metadata/doc-files/jpeg_metadata.html#color
- APP12:
http://search.cpan.org/~exiftool/Image-ExifTool/lib/Image/ExifTool/TagNames.pod
Author: Victor Stinner
"""
from hachoir_core.error import HachoirError
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, Enum,
Bit, Bits, NullBits, NullBytes,
String, RawBytes)
from hachoir_parser.image.common import PaletteRGB
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.image.exif import Exif
from hachoir_parser.image.photoshop_metadata import PhotoshopMetadata
MAX_FILESIZE = 100 * 1024 * 1024
# The four tables (hash/sum for color/grayscale JPEG) comes
# from ImageMagick project
QUALITY_HASH_COLOR = (
1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645,
632, 623, 613, 607, 600, 594, 589, 585, 581, 571,
555, 542, 529, 514, 494, 474, 457, 439, 424, 410,
397, 386, 373, 364, 351, 341, 334, 324, 317, 309,
299, 294, 287, 279, 274, 267, 262, 257, 251, 247,
243, 237, 232, 227, 222, 217, 213, 207, 202, 198,
192, 188, 183, 177, 173, 168, 163, 157, 153, 148,
143, 139, 132, 128, 125, 119, 115, 108, 104, 99,
94, 90, 84, 79, 74, 70, 64, 59, 55, 49,
45, 40, 34, 30, 25, 20, 15, 11, 6, 4,
0)
QUALITY_SUM_COLOR = (
32640,32635,32266,31495,30665,29804,29146,28599,28104,27670,
27225,26725,26210,25716,25240,24789,24373,23946,23572,22846,
21801,20842,19949,19121,18386,17651,16998,16349,15800,15247,
14783,14321,13859,13535,13081,12702,12423,12056,11779,11513,
11135,10955,10676,10392,10208, 9928, 9747, 9564, 9369, 9193,
9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347,
7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495,
5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698,
3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846,
1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128,
0)
QUALITY_HASH_GRAY = (
510, 505, 422, 380, 355, 338, 326, 318, 311, 305,
300, 297, 293, 291, 288, 286, 284, 283, 281, 280,
279, 278, 277, 273, 262, 251, 243, 233, 225, 218,
211, 205, 198, 193, 186, 181, 177, 172, 168, 164,
158, 156, 152, 148, 145, 142, 139, 136, 133, 131,
129, 126, 123, 120, 118, 115, 113, 110, 107, 105,
102, 100, 97, 94, 92, 89, 87, 83, 81, 79,
76, 74, 70, 68, 66, 63, 61, 57, 55, 52,
50, 48, 44, 42, 39, 37, 34, 31, 29, 26,
24, 21, 18, 16, 13, 11, 8, 6, 3, 2,
0)
QUALITY_SUM_GRAY = (
16320,16315,15946,15277,14655,14073,13623,13230,12859,12560,
12240,11861,11456,11081,10714,10360,10027, 9679, 9368, 9056,
8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125,
5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616,
4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688,
3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952,
2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211,
2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477,
1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736,
667, 592, 518, 441, 369, 292, 221, 151, 86, 64,
0)
JPEG_NATURAL_ORDER = (
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63)
class JpegChunkApp0(FieldSet):
UNIT_NAME = {
0: "pixels",
1: "dots per inch",
2: "dots per cm",
}
def createFields(self):
yield String(self, "jfif", 5, "JFIF string", charset="ASCII")
if self["jfif"].value != "JFIF\0":
raise ParserError(
"Stream doesn't look like JPEG chunk (wrong JFIF signature)")
yield UInt8(self, "ver_maj", "Major version")
yield UInt8(self, "ver_min", "Minor version")
yield Enum(UInt8(self, "units", "Units"), self.UNIT_NAME)
if self["units"].value == 0:
yield UInt16(self, "aspect_x", "Aspect ratio (X)")
yield UInt16(self, "aspect_y", "Aspect ratio (Y)")
else:
yield UInt16(self, "x_density", "X density")
yield UInt16(self, "y_density", "Y density")
yield UInt8(self, "thumb_w", "Thumbnail width")
yield UInt8(self, "thumb_h", "Thumbnail height")
thumb_size = self["thumb_w"].value * self["thumb_h"].value
if thumb_size != 0:
yield PaletteRGB(self, "thumb_palette", 256)
yield RawBytes(self, "thumb_data", thumb_size, "Thumbnail data")
class Ducky(FieldSet):
BLOCK_TYPE = {
0: "end",
1: "Quality",
2: "Comment",
3: "Copyright",
}
def createFields(self):
yield Enum(UInt16(self, "type"), self.BLOCK_TYPE)
if self["type"].value == 0:
return
yield UInt16(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class APP12(FieldSet):
"""
The JPEG APP12 "Picture Info" segment was used by some older cameras, and
contains ASCII-based meta information.
"""
def createFields(self):
yield String(self, "ducky", 5, '"Ducky" string', charset="ASCII")
while not self.eof:
yield Ducky(self, "item[]")
class StartOfFrame(FieldSet):
def createFields(self):
yield UInt8(self, "precision")
yield UInt16(self, "height")
yield UInt16(self, "width")
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
yield UInt8(self, "component_id[]")
yield UInt8(self, "high[]")
yield UInt8(self, "low[]")
class Comment(FieldSet):
def createFields(self):
yield String(self, "comment", self.size//8, strip="\0")
class AdobeChunk(FieldSet):
COLORSPACE_TRANSFORMATION = {
1: "YCbCr (converted from RGB)",
2: "YCCK (converted from CMYK)",
}
def createFields(self):
if self.stream.readBytes(self.absolute_address, 5) != "Adobe":
yield RawBytes(self, "raw", self.size//8, "Raw data")
return
yield String(self, "adobe", 5, "\"Adobe\" string", charset="ASCII")
yield UInt16(self, "version", "DCT encoder version")
yield Enum(Bit(self, "flag00"),
{False: "Chop down or subsampling", True: "Blend"})
yield NullBits(self, "flags0_reserved", 15)
yield NullBytes(self, "flags1", 2)
yield Enum(UInt8(self, "color_transform", "Colorspace transformation code"), self.COLORSPACE_TRANSFORMATION)
class StartOfScan(FieldSet):
def createFields(self):
yield UInt8(self, "nr_components")
for index in range(self["nr_components"].value):
comp_id = UInt8(self, "component_id[]")
yield comp_id
if not(1 <= comp_id.value <= self["nr_components"].value):
raise ParserError("JPEG error: Invalid component-id")
yield UInt8(self, "value[]")
yield RawBytes(self, "raw", 3) # TODO: What's this???
class RestartInterval(FieldSet):
def createFields(self):
yield UInt16(self, "interval", "Restart interval")
class QuantizationTable(FieldSet):
def createFields(self):
# Code based on function get_dqt() (jdmarker.c from libjpeg62)
yield Bits(self, "is_16bit", 4)
yield Bits(self, "index", 4)
if self["index"].value >= 4:
raise ParserError("Invalid quantification index (%s)" % self["index"].value)
if self["is_16bit"].value:
coeff_type = UInt16
else:
coeff_type = UInt8
for index in xrange(64):
natural = JPEG_NATURAL_ORDER[index]
yield coeff_type(self, "coeff[%u]" % natural)
def createDescription(self):
return "Quantification table #%u" % self["index"].value
class DefineQuantizationTable(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield QuantizationTable(self, "qt[]")
class JpegChunk(FieldSet):
TAG_SOI = 0xD8
TAG_EOI = 0xD9
TAG_SOS = 0xDA
TAG_DQT = 0xDB
TAG_DRI = 0xDD
TAG_INFO = {
0xC4: ("huffman[]", "Define Huffman Table (DHT)", None),
0xD8: ("start_image", "Start of image (SOI)", None),
0xD9: ("end_image", "End of image (EOI)", None),
0xDA: ("start_scan", "Start Of Scan (SOS)", StartOfScan),
0xDB: ("quantization[]", "Define Quantization Table (DQT)", DefineQuantizationTable),
0xDC: ("nb_line", "Define number of Lines (DNL)", None),
0xDD: ("restart_interval", "Define Restart Interval (DRI)", RestartInterval),
0xE0: ("app0", "APP0", JpegChunkApp0),
0xE1: ("exif", "Exif metadata", Exif),
0xE2: ("icc", "ICC profile", None),
0xEC: ("app12", "APP12", APP12),
0xED: ("photoshop", "Photoshop", PhotoshopMetadata),
0xEE: ("adobe", "Image encoding information for DCT filters (Adobe)", AdobeChunk),
0xFE: ("comment[]", "Comment", Comment),
}
START_OF_FRAME = {
0xC0: u"Baseline",
0xC1: u"Extended sequential",
0xC2: u"Progressive",
0xC3: u"Lossless",
0xC5: u"Differential sequential",
0xC6: u"Differential progressive",
0xC7: u"Differential lossless",
0xC9: u"Extended sequential, arithmetic coding",
0xCA: u"Progressive, arithmetic coding",
0xCB: u"Lossless, arithmetic coding",
0xCD: u"Differential sequential, arithmetic coding",
0xCE: u"Differential progressive, arithmetic coding",
0xCF: u"Differential lossless, arithmetic coding",
}
for key, text in START_OF_FRAME.iteritems():
TAG_INFO[key] = ("start_frame", "Start of frame (%s)" % text.lower(), StartOfFrame)
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
tag = self["type"].value
if tag == 0xE1:
# Hack for Adobe extension: XAP metadata (as XML)
bytes = self.stream.readBytes(self.absolute_address + 32, 6)
if bytes == "Exif\0\0":
self._name = "exif"
self._description = "EXIF"
self._parser = Exif
else:
self._parser = None
elif tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield textHandler(UInt8(self, "header", "Header"), hexadecimal)
if self["header"].value != 0xFF:
raise ParserError("JPEG: Invalid chunk header!")
yield textHandler(UInt8(self, "type", "Type"), hexadecimal)
tag = self["type"].value
if tag in (self.TAG_SOI, self.TAG_EOI):
return
yield UInt16(self, "size", "Size")
size = (self["size"].value - 2)
if 0 < size:
if self._parser:
yield self._parser(self, "content", "Chunk content", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Chunk: %s" % self["type"].display
class JpegFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "jpeg",
"category": "image",
"file_ext": ("jpg", "jpeg"),
"mime": (u"image/jpeg",),
"magic": (
("\xFF\xD8\xFF\xE0", 0), # (Start Of Image, APP0)
("\xFF\xD8\xFF\xE1", 0), # (Start Of Image, EXIF)
("\xFF\xD8\xFF\xEE", 0), # (Start Of Image, Adobe)
),
"min_size": 22*8,
"description": "JPEG picture",
"subfile": "skip",
}
def validate(self):
if self.stream.readBytes(0, 2) != "\xFF\xD8":
return "Invalid file signature"
try:
for index, field in enumerate(self):
chunk_type = field["type"].value
if chunk_type not in JpegChunk.TAG_INFO:
return "Unknown chunk type: 0x%02X (chunk #%s)" % (chunk_type, index)
if index == 2:
# Only check 3 fields
break
except HachoirError:
return "Unable to parse at least three chunks"
return True
def createFields(self):
while not self.eof:
chunk = JpegChunk(self, "chunk[]")
yield chunk
if chunk["type"].value == JpegChunk.TAG_SOS:
# TODO: Read JPEG image data...
break
# TODO: is it possible to handle piped input?
if self._size is None:
raise NotImplementedError
has_end = False
size = (self._size - self.current_size) // 8
if size:
if 2 < size \
and self.stream.readBytes(self._size - 16, 2) == "\xff\xd9":
has_end = True
size -= 2
yield RawBytes(self, "data", size, "JPEG data")
if has_end:
yield JpegChunk(self, "chunk[]")
def createDescription(self):
desc = "JPEG picture"
if "sof/content" in self:
header = self["sof/content"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
return desc
def createContentSize(self):
if "end" in self:
return self["end"].absolute_address + self["end"].size
if "data" not in self:
return None
start = self["data"].absolute_address
end = self.stream.searchBytes("\xff\xd9", start, MAX_FILESIZE*8)
if end is not None:
return end + 16
return None
| 0.008513 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://launchpad.net/wxbanker
# modeltests.py: Copyright 2007-2010 Mike Rooney <[email protected]>
#
# This file is part of wxBanker.
#
# wxBanker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wxBanker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wxBanker. If not, see <http://www.gnu.org/licenses/>.
from wxbanker.tests import testbase
import os, datetime, unittest
from wxbanker import controller, bankexceptions, currencies
from wxbanker.lib.pubsub import Publisher
from wxbanker.bankobjects.account import Account
from wxbanker.mint import api as mintapi
from wxbanker.tests.testbase import today, yesterday, tomorrow
class ModelTests(testbase.TestCaseWithController):
def testRobustTransactionAmountParsing(self):
model = self.Controller.Model
a = model.CreateAccount("Test")
self.assertEquals(a.ParseAmount("3"), 3)
self.assertEquals(a.ParseAmount(".3"), .3)
self.assertEquals(a.ParseAmount(".31"), .31)
self.assertEquals(a.ParseAmount(",3"), .3)
self.assertEquals(a.ParseAmount(",31"), .31)
self.assertEquals(a.ParseAmount("1.5"), 1.5)
self.assertEquals(a.ParseAmount("1,5"), 1.5)
self.assertEquals(a.ParseAmount("10"), 10)
self.assertEquals(a.ParseAmount("10."), 10)
self.assertEquals(a.ParseAmount("10.1"), 10.1)
self.assertEquals(a.ParseAmount("10.23"), 10.23)
self.assertEquals(a.ParseAmount("10,"), 10)
self.assertEquals(a.ParseAmount("10,1"), 10.1)
self.assertEquals(a.ParseAmount("10,23"), 10.23)
self.assertEquals(a.ParseAmount("1 000"), 1000)
self.assertEquals(a.ParseAmount("1 000."), 1000)
self.assertEquals(a.ParseAmount("1 000,"), 1000)
self.assertEquals(a.ParseAmount("1,000"), 1000)
self.assertEquals(a.ParseAmount("1,000."), 1000)
self.assertEquals(a.ParseAmount("1,000.2"), 1000.2)
self.assertEquals(a.ParseAmount("1.000.23"), 1000.23)
self.assertEquals(a.ParseAmount("1 000.23"), 1000.23)
self.assertEquals(a.ParseAmount("1,000.23"), 1000.23)
self.assertEquals(a.ParseAmount("1.000,23"), 1000.23)
self.assertEquals(a.ParseAmount("1 000,23"), 1000.23)
self.assertEquals(a.ParseAmount("1234567890"), 1234567890)
def testFreshAccount(self):
a = self.Model.CreateAccount("Fresh")
self.assertEqual(a.Balance, 0)
self.assertEqual(a.Transactions, [])
self.assertEqual(a.Name, "Fresh")
def testCannotRemoveNonexistentAccount(self):
self.assertRaisesWithMsg(self.Model.RemoveAccount, ["Foo"], bankexceptions.InvalidAccountException, "Invalid account 'Foo' specified.")
def testCannotCreateAccountWithSameName(self):
a = self.Model.CreateAccount("A")
self.assertRaisesWithMsg(self.Model.CreateAccount, ["A"], bankexceptions.AccountAlreadyExistsException, "Account 'A' already exists.")
def testControllerIsAutoSavingByDefault(self):
self.assertTrue( self.Controller.AutoSave )
def testNewAccountIsSameCurrencyAsOthers(self):
# This test is only valid so long as only one currency is allowed.
# Otherwise it needs to test a new account gets the right default currency, probably Localized
model = self.Controller.Model
account = model.CreateAccount("Hello")
self.assertEqual(account.Currency, currencies.LocalizedCurrency())
account.Currency = currencies.EuroCurrency()
self.assertEqual(account.Currency, currencies.EuroCurrency())
account2 = model.CreateAccount("Another!")
self.assertEqual(account2.Currency, currencies.EuroCurrency())
def testLoadingTransactionsPreservesReferences(self):
a = self.Model.CreateAccount("A")
t = a.AddTransaction(1, "First")
self.assertEqual(t.Description, "First")
# When we do a.Transactions, the list gets loaded with new
# transaction objects, so let's see if the containership test works.
self.assertTrue(t in a.Transactions)
# 't' is the original transaction object created before Transactions
# was loaded, but it should be in the list due to magic.
t.Description = "Second"
self.assertEqual(a.Transactions[0].Description, "Second")
def testSimpleMove(self):
model1 = self.Controller.Model
a = model1.CreateAccount("A")
t1 = a.AddTransaction(-1)
b = model1.CreateAccount("B")
a.MoveTransaction(t1, b)
self.assertFalse(t1 in a.Transactions)
self.assertTrue(t1 in b.Transactions)
self.assertNotEqual(t1.Parent, a)
self.assertEqual(t1.Parent, b)
def testTransactionPropertyBug(self):
model1 = self.Controller.Model
a = model1.CreateAccount("A")
t1 = a.AddTransaction(-1)
self.assertEqual(len(a.Transactions), 1)
def testDirtyExitWarns(self):
"""
This test is kind of hilarious. We want to make sure we are warned of
exiting with a dirty model, so we create an account, register a callback
which will change its name when the dirty warning goes out, then trigger
a dirty exit and make sure the account name has changed.
"""
self.Controller.AutoSave = False
a = self.Model.CreateAccount("Unwarned!")
# Create and register our callback to test for the warning message.
def cb(message):
a.Name = "Warned"
Publisher.subscribe(cb, "warning.dirty exit")
# Now send the exiting message, which should cause our callback to fire if everything is well.
Publisher.sendMessage("exiting")
self.assertEqual(a.Name, "Warned")
def testAnnouncedAccountHasParent(self):
"""
Make sure the account has a Parent when it announces itself. To do this
we need to test this in a listener.
"""
parent = []
def listener(message):
account = message.data
parent.append(account.Parent)
# Subscribe our listener
Publisher.subscribe(listener, "account.created")
# Create an account, which should trigger the listener
baby = self.Model.CreateAccount("Baby")
# Make sure the listener updated state appropriately
self.assertTrue(parent)
def testSiblingsSingleAccount(self):
baby = self.Model.CreateAccount("Baby")
self.assertEqual(baby.GetSiblings(), [])
def testSiblingsTwoAccounts(self):
a = self.Model.CreateAccount("A")
b = self.Model.CreateAccount("B")
self.assertEqual(a.GetSiblings(), [b])
self.assertEqual(b.GetSiblings(), [a])
def testTransfersAreLinked(self):
a, b, atrans, btrans = self.createLinkedTransfers()
self.assertEqual(atrans.Parent, a)
self.assertEqual(btrans.Parent, b)
self.assertEqual(atrans.LinkedTransaction, btrans)
self.assertEqual(btrans.LinkedTransaction, atrans)
self.assertEqual(a.Transactions, [atrans])
self.assertEqual(b.Transactions, [btrans])
def testDeletingTransferDeletesBoth(self):
a, b, atrans, btrans = self.createLinkedTransfers()
model = self.Controller.Model
self.assertEqual(len(model.Accounts), 2)
self.assertEqual(model.GetTransactions(), [atrans, btrans])
self.assertEqual(model.Balance, 0)
self.assertEqual(len(a.Transactions), 1)
self.assertEqual(len(b.Transactions), 1)
a.RemoveTransaction(atrans)
self.assertEqual(len(a.Transactions), 0)
self.assertEqual(len(b.Transactions), 0)
self.assertEqual(model.GetTransactions(), [])
self.assertEqual(model.Balance, 0)
def testEmptyAccountNameInvalidForNewAccount(self):
self.assertRaises(bankexceptions.BlankAccountNameException, lambda: self.Controller.Model.CreateAccount(""), )
def testEmptyAccountNameInvalidForRename(self):
a = self.Controller.Model.CreateAccount("Test")
def blankName():
a.Name = ""
self.assertRaises(bankexceptions.BlankAccountNameException, blankName)
def testGetDateRangeWhenEmpty(self):
self.assertEqual(self.Controller.Model.GetDateRange(), (datetime.date.today(), datetime.date.today()))
def testGetDateRangeWithTransactions(self):
model = self.Controller.Model
a = model.CreateAccount("A")
a.AddTransaction(1, date=yesterday)
a.AddTransaction(1, date=tomorrow)
self.assertEqual(model.GetDateRange(), (yesterday, tomorrow))
def testGetDateRangeSorts(self):
# Make sure that the transactions don't need to be in order for GetDateRange to work.
model = self.Controller.Model
a = model.CreateAccount("A")
a.AddTransaction(1, date=today)
a.AddTransaction(1, date=yesterday)
self.assertEqual(model.GetDateRange(), (yesterday, today))
def testAccountRename(self):
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(a.Name, "A")
a.Name = "B"
self.assertEqual(a.Name, "B")
self.assertRaisesWithMsg(model.RemoveAccount, ["A"], bankexceptions.InvalidAccountException, "Invalid account 'A' specified.")
def testTransactionDescriptionChange(self):
model = self.Controller.Model
a = model.CreateAccount("A")
t = a.AddTransaction(1, "test")
self.assertEqual(t.Description, "test")
t.Description = "new"
self.assertEqual(t.Description, "new")
def testBalanceIsUpdatedOnTransactionAdded(self):
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(a.Balance, 0)
a.AddTransaction(1)
self.assertEqual(a.Balance, 1)
def testBalanceIsUpdatedOnTransactionRemoved(self):
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(a.Balance, 0)
t = a.AddTransaction(1)
self.assertEqual(a.Balance, 1)
a.RemoveTransaction(t)
self.assertEqual(a.Balance, 0)
def testBalanceIsUpdatedOnTransactionAmountModified(self):
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(a.Balance, 0)
t = a.AddTransaction(1)
self.assertEqual(a.Balance, 1)
t.Amount = 2
self.assertEqual(a.Balance, 2)
def testModelBalance(self):
model = self.Controller.Model
self.assertEqual(model.Balance, 0)
a = model.CreateAccount("A")
a.AddTransaction(1)
self.assertEqual(model.Balance, 1)
b = model.CreateAccount("B")
b.AddTransaction(2)
self.assertEqual(model.Balance, 3)
def testRemovingTransactionsReturnsSources(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
t = a.AddTransaction(1)
result = a.RemoveTransaction(t)
self.assertEqual(result, [None])
ta, tb = a.AddTransaction(1, source=b)
result = a.RemoveTransaction(ta)
self.assertEqual(result, [b], result[0].Name)
def testCanMoveTransferSource(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
atrans, btrans = a.AddTransaction(1, source=b)
self.assertEqual(len(model.GetTransactions()), 2)
self.assertEqual(model.Balance, 0)
self.assertEqual(atrans.Description, "Transfer from B")
self.assertEqual(btrans.Description, "Transfer to A")
c = model.CreateAccount("C")
b.MoveTransaction(btrans, c)
self.assertEqual(b.Transactions, [])
self.assertEqual(len(a.Transactions), 1)
self.assertEqual(len(c.Transactions), 1)
atrans = a.Transactions[0]
ctrans = c.Transactions[0]
self.assertEqual(atrans.LinkedTransaction, ctrans)
self.assertEqual(ctrans.LinkedTransaction, atrans)
self.assertEqual(atrans.Description, "Transfer from C")
self.assertEqual(ctrans.Description, "Transfer to A")
def testCanMoveTransferDestination(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
atrans, btrans = a.AddTransaction(1, source=b)
self.assertEqual(len(model.GetTransactions()), 2)
self.assertEqual(model.Balance, 0)
self.assertEqual(atrans.Description, "Transfer from B")
self.assertEqual(btrans.Description, "Transfer to A")
c = model.CreateAccount("C")
a.MoveTransaction(atrans, c)
self.assertEqual(a.Transactions, [])
self.assertEqual(len(b.Transactions), 1)
self.assertEqual(len(c.Transactions), 1)
btrans = b.Transactions[0]
ctrans = c.Transactions[0]
self.assertEqual(btrans.LinkedTransaction, ctrans)
self.assertEqual(ctrans.LinkedTransaction, btrans)
self.assertEqual(btrans.Description, "Transfer to C")
self.assertEqual(ctrans.Description, "Transfer from B")
def testTransferDescriptionWithoutDescription(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
at, bt = a.AddTransaction(1, source=b)
self.assertEqual(at._Description, "")
self.assertEqual(bt._Description, "")
self.assertEqual(at.Description, "Transfer from B")
self.assertEqual(bt.Description, "Transfer to A")
def testTransferDescriptionWithDescription(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
at, bt = a.AddTransaction(1, description="hello world", source=b)
self.assertEqual(at._Description, "hello world")
self.assertEqual(bt._Description, "hello world")
self.assertEqual(at.Description, "Transfer from B (hello world)")
self.assertEqual(bt.Description, "Transfer to A (hello world)")
def testTransferMoveDescriptionWithDescription(self):
model = self.Controller.Model
a = model.CreateAccount("A")
b = model.CreateAccount("B")
c = model.CreateAccount("C")
at, bt = a.AddTransaction(1, description="hello world", source=b)
a.MoveTransaction(at, c)
bt, ct = b.Transactions[0], c.Transactions[0]
self.assertEqual(ct._Description, "hello world")
self.assertEqual(bt._Description, "hello world")
self.assertEqual(ct.Description, "Transfer from B (hello world)")
self.assertEqual(bt.Description, "Transfer to C (hello world)")
def testUnicodeTransactionDescription(self):
unicodeString = u'¥'
unicodeString2 = u'¥2'
model = self.Controller.Model
a = model.CreateAccount("A")
t = a.AddTransaction(1, description=unicodeString)
self.assertEqual(t.Description, unicodeString)
t.Description = unicodeString2
self.assertEqual(t.Description, unicodeString2)
def testUnicodeSearch(self):
unicodeString = u'¥'
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(model.Search(unicodeString), [])
t = a.AddTransaction(1, description=unicodeString)
self.assertEqual(model.Search(unicodeString), [t])
def testAccountsAreSorted(self):
model = self.Controller.Model
b = model.CreateAccount("B")
self.assertEqual(model.Accounts, [b])
a = model.CreateAccount("A")
self.assertEqual(model.Accounts, [a, b])
a.Name = "Z"
self.assertEqual(model.Accounts, [b, a])
def testDefaultLastAccountIsNone(self):
model = self.Controller.Model
self.assertEqual(model.LastAccountId, None)
def testLastAccountIsUpdated(self):
model = self.Controller.Model
a = model.CreateAccount("A")
self.assertEqual(model.LastAccountId, None)
Publisher.sendMessage("view.account changed", a)
self.assertEqual(model.LastAccountId, a.ID)
def testTransactionDateMassaging(self):
model = self.Controller.Model
t = model.CreateAccount("A").AddTransaction(1)
self.assertEqual(t.Date, today)
t.Date = "2001/01/01"
self.assertEqual(t.Date, datetime.date(2001, 1, 1))
t.Date = "2008-01-06"
self.assertEqual(t.Date, datetime.date(2008, 1, 6))
t.Date = "08-01-06"
self.assertEqual(t.Date, datetime.date(2008, 1, 6))
t.Date = "86-01-06"
self.assertEqual(t.Date, datetime.date(1986, 1, 6))
t.Date = "11-01-06"
self.assertEqual(t.Date, datetime.date(2011, 1, 6))
t.Date = "0-1-6"
self.assertEqual(t.Date, datetime.date(2000, 1, 6))
t.Date = "0/1/6"
self.assertEqual(t.Date, datetime.date(2000, 1, 6))
t.Date = datetime.date(2008, 1, 6)
self.assertEqual(t.Date, datetime.date(2008, 1, 6))
t.Date = None
self.assertEqual(t.Date, datetime.date.today())
def testDeletingAccountDoesNotDeleteSiblingLinkedTransfers(self):
"""If you close (delete) an account, it is still true that the transfers occurred."""
a, b, atrans, btrans = self.createLinkedTransfers()
model = self.Model
self.assertTrue(atrans in a.Transactions)
self.assertTrue(btrans in b.Transactions)
self.assertEqual(atrans.LinkedTransaction, btrans)
self.assertEqual(btrans.LinkedTransaction, atrans)
model.RemoveAccount(b.Name)
self.assertTrue(atrans in a.Transactions)
self.assertEqual(atrans.LinkedTransaction, None)
def testDefaultMintIntegrationIsFalse(self):
self.assertEqual(self.Model.MintEnabled, False)
def testAccountMintSync(self):
mintapi.Mint._CachedAccounts = {1218040: {'name': 'PayPal PayPal', 'balance': -4277.24}}
model = self.Model
a = model.CreateAccount("Foo")
self.assertFalse(a.IsMintEnabled())
self.assertRaises(bankexceptions.MintIntegrationException, a.IsInSync)
a.MintId = 1218040
self.assertEquals(1218040, a.GetMintId())
self.assertTrue(a.IsMintEnabled())
self.assertFalse(a.IsInSync())
# Add the balance and we should be in sync.
a.AddTransaction(-4277.24)
self.assertTrue(a.IsInSync())
self.assertEqual(a.GetSyncString(), "PayPal PayPal: -$4,277.24")
def testAccountMintSyncWithFutureDates(self):
mintapi.Mint._CachedAccounts = {1218040: {'name': 'foo', 'balance': -4277.24}}
model = self.Model
a = model.CreateAccount("Foo")
a.MintId = 1218040
# Add the balance and we should be in sync.
a.AddTransaction(-4277.24)
self.assertTrue(a.IsInSync())
# Add a transaction today, we should be out of sync.
t = a.AddTransaction(1)
self.assertFalse(a.IsInSync())
# Change the date to tomorrow, we should be back in sync as of today.
t.Date = tomorrow
self.assertTrue(a.IsInSync())
def testAccountMintIdIsInt(self):
model = self.Model
a = model.CreateAccount("Foo")
a.MintId = "12345"
self.assertEquals(a.MintId, 12345)
def testAccountCurrentBalance(self):
model = self.Model
a = model.CreateAccount("Bar")
self.assertEqual(a.Balance, 0)
self.assertEqual(a.CurrentBalance, 0)
t1 = a.AddTransaction(1)
self.assertEqual(a.Balance, 1)
self.assertEqual(a.CurrentBalance, 1)
t2 = a.AddTransaction(1, date=tomorrow)
self.assertEqual(a.Balance, 2)
self.assertEqual(a.CurrentBalance, 1)
# Make sure that it isn't just by transaction order but actually date.
t2.Date = today
t1.Date = tomorrow
self.assertEqual(a.Balance, 2)
self.assertEqual(a.CurrentBalance, 1)
def testAccountBalanceAndCurrencyNotNone(self):
model = self.Model
accounts = [
Account(model, None, "Foo"),
Account(model, None, "Bar", currency=None),
Account(model, None, "Food", balance=None),
Account(model, None, "Pub", currency=None, balance=None)
]
for account in accounts:
self.assertEqual(account.Currency, currencies.CurrencyList[0]())
self.assertEqual(account.Balance, 0.0)
self.assertTrue(type(account.Balance) is float)
if __name__ == "__main__":
unittest.main()
| 0.004196 |
import pytest
import h5py
from h5preserve import open as hp_open, H5PreserveFile
@pytest.mark.roundtrip
def test_roundtrip(tmpdir, obj_registry):
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with hp_open(tmpfile, registries=obj_registry["registries"], mode='x') as f:
f["first"] = obj_registry["dumpable_object"]
with hp_open(tmpfile, registries=obj_registry["registries"], mode='r') as f:
roundtripped = f["first"]
assert roundtripped == obj_registry["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_without_open(tmpdir, obj_registry):
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with H5PreserveFile(
h5py.File(tmpfile, 'x'), registries=obj_registry["registries"]
) as f:
f["first"] = obj_registry["dumpable_object"]
with H5PreserveFile(
h5py.File(tmpfile, 'r'), registries=obj_registry["registries"]
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_with_defaults(tmpdir, obj_registry_with_defaults):
obj_registry = obj_registry_with_defaults
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with hp_open(tmpfile, registries=obj_registry["registries"], mode='x') as f:
f["first"] = obj_registry["dumpable_object"]
with hp_open(tmpfile, registries=obj_registry["registries"], mode='r') as f:
roundtripped = f["first"]
assert roundtripped == obj_registry["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_without_open_with_defaults(
tmpdir, obj_registry_with_defaults
):
obj_registry = obj_registry_with_defaults
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with H5PreserveFile(
h5py.File(tmpfile, mode='x'), registries=obj_registry["registries"]
) as f:
f["first"] = obj_registry["dumpable_object"]
with H5PreserveFile(
h5py.File(tmpfile, mode='r'), registries=obj_registry["registries"]
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry["dumpable_object"]
if hasattr(h5py, "Empty"):
@pytest.mark.roundtrip
def test_roundtrip_with_none(tmpdir, obj_registry_with_none):
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with hp_open(
tmpfile, registries=obj_registry_with_none["registries"], mode='x'
) as f:
f["first"] = obj_registry_with_none["dumpable_object"]
with hp_open(
tmpfile, registries=obj_registry_with_none["registries"], mode='r'
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry_with_none["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_without_open_with_none(tmpdir, obj_registry_with_none):
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with H5PreserveFile(
h5py.File(tmpfile, 'x'),
registries=obj_registry_with_none["registries"]
) as f:
f["first"] = obj_registry_with_none["dumpable_object"]
with H5PreserveFile(
h5py.File(tmpfile, 'r'),
registries=obj_registry_with_none["registries"]
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry_with_none["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_with_defaults_with_none(
tmpdir, obj_registry_with_none_with_defaults
):
obj_registry_with_none = obj_registry_with_none_with_defaults
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with hp_open(
tmpfile, registries=obj_registry_with_none["registries"], mode='x'
) as f:
f["first"] = obj_registry_with_none["dumpable_object"]
with hp_open(
tmpfile, registries=obj_registry_with_none["registries"], mode='r'
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry_with_none["dumpable_object"]
@pytest.mark.roundtrip
def test_roundtrip_without_open_with_defaults_with_none(
tmpdir, obj_registry_with_none_with_defaults
):
obj_registry_with_none = obj_registry_with_none_with_defaults
tmpfile = str(tmpdir.join("test_roundtrip.h5"))
with H5PreserveFile(
h5py.File(tmpfile, 'x'),
registries=obj_registry_with_none["registries"]
) as f:
f["first"] = obj_registry_with_none["dumpable_object"]
with H5PreserveFile(
h5py.File(tmpfile, 'r'),
registries=obj_registry_with_none["registries"]
) as f:
roundtripped = f["first"]
assert roundtripped == obj_registry_with_none["dumpable_object"]
| 0.001895 |
# -*- coding: utf-8 -*-
import importlib
import inspect
import urllib
__author__ = 'renzo'
class PathNotFound(Exception): pass
package_base = "web"
home_base = "home"
index_base = "index"
def _to_abs_package(package_slices):
if package_slices:
return package_base + "." + ".".join(package_slices)
return package_base
def _check_convention_params(args, convention_params):
convention_list = []
for a in args:
if a in convention_params:
convention_list.append(convention_params.get(a))
else:
break
return convention_list
def _check_params(params, convention_params, spec, **kwargs):
args = spec[0]
all_params = _check_convention_params(args, convention_params) + params
param_num = len(all_params)
max_args = len(args) if args else 0
defaults = spec[3]
defaults_num = len(defaults) if defaults else 0
min_args = max_args - defaults_num
kwargs_num = len(kwargs)
all_args_num = param_num + kwargs_num
if min_args <= all_args_num <= max_args: return all_params
varargs = spec[1]
method_kwargs = spec[2]
if varargs and method_kwargs: return all_params
if varargs and not kwargs and param_num >= min_args:
return all_params
if method_kwargs and param_num >= (min_args - kwargs_num):
return all_params
def _import_helper(package, module_name, fcn_name, params, convention_params, **kwargs):
try:
full_module = package + "." + module_name
module = importlib.import_module(full_module)
if hasattr(module, fcn_name):
fcn = getattr(module, fcn_name)
if fcn and inspect.isfunction(fcn):
all_params = _check_params(params, convention_params, inspect.getargspec(fcn), **kwargs)
if not (all_params is None):
return fcn, all_params
except ImportError:
pass
def _build_pack_and_slices(package, slices):
slice_number = min(len(slices), 2)
package = ".".join([package] + slices[:-slice_number])
path_slices = slices[-slice_number:]
return package, path_slices
def _search_full_path(package, path_slices, defaults=[], params=[], convention_params={}, **kwargs):
slices = path_slices + defaults
if len(slices) < 2: return
pack, slices = _build_pack_and_slices(package, slices)
result = _import_helper(pack, *slices, params=params, convention_params=convention_params, **kwargs)
if result or not path_slices:
return result
params.insert(0, path_slices.pop())
return _search_full_path(package, path_slices, defaults, params, convention_params, **kwargs)
def _maybe_import(package, path_slices, convention_params, **kwargs):
result = _search_full_path(package, path_slices[:], [], [], convention_params, **kwargs)
if result: return result
result = _search_full_path(package, path_slices[:], [index_base], [], convention_params, **kwargs)
if result: return result
result = _search_full_path(package, path_slices[:], [home_base, index_base], [], convention_params, **kwargs)
if result: return result
raise PathNotFound()
def to_handler(path, convention_params={}, **kwargs):
decoded_path = urllib.unquote(path)
path_slices = [d for d in decoded_path.split("/") if d != ""]
# Try importing package.handler.method
return _maybe_import(package_base, path_slices, convention_params, **kwargs)
def _build_params(*params):
if params:
def f(p):
if isinstance(p, basestring):
return urllib.quote(p)
return urllib.quote(str(p))
params = [f(p) for p in params]
return "/" + "/".join(params)
return ""
def to_path(handler, *params):
params = _build_params(*params)
if inspect.ismodule(handler):
name = handler.__name__
else:
name = handler.__module__ + "/" + handler.__name__
name = name.replace(package_base, "", 1)
def remove_from_end(path, suffix):
return path[:-len(suffix)-1] if path.endswith(suffix) else path
home_index = '/'.join((home_base, index_base))
name=remove_from_end(name,home_index)
name=remove_from_end(name,index_base)
if not name: return params or "/"
return name.replace(".", "/") + params
def _extract_full_module(klass):
return klass.__module__ + "/" + klass.__name__
| 0.004776 |
"""ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from google.appengine._internal.antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
############################################################################
#
# basic token interface
#
############################################################################
class Token(object):
"""@brief Abstract token baseclass."""
def getText(self):
"""@brief Get the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def setText(self, text):
"""@brief Set the text of the token.
Using setter/getter methods is deprecated. Use o.text instead.
"""
raise NotImplementedError
def getType(self):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def setType(self, ttype):
"""@brief Get the type of the token.
Using setter/getter methods is deprecated. Use o.type instead."""
raise NotImplementedError
def getLine(self):
"""@brief Get the line number on which this token was matched
Lines are numbered 1..n
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def setLine(self, line):
"""@brief Set the line number on which this token was matched
Using setter/getter methods is deprecated. Use o.line instead."""
raise NotImplementedError
def getCharPositionInLine(self):
"""@brief Get the column of the tokens first character,
Columns are numbered 0..n-1
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def setCharPositionInLine(self, pos):
"""@brief Set the column of the tokens first character,
Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
raise NotImplementedError
def getChannel(self):
"""@brief Get the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def setChannel(self, channel):
"""@brief Set the channel of the token
Using setter/getter methods is deprecated. Use o.channel instead."""
raise NotImplementedError
def getTokenIndex(self):
"""@brief Get the index in the input stream.
An index from 0..n-1 of the token object in the input stream.
This must be valid in order to use the ANTLRWorks debugger.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def setTokenIndex(self, index):
"""@brief Set the index in the input stream.
Using setter/getter methods is deprecated. Use o.index instead."""
raise NotImplementedError
def getInputStream(self):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
def setInputStream(self, input):
"""@brief From what character stream was this token created.
You don't have to implement but it's nice to know where a Token
comes from if you have include files etc... on the input."""
raise NotImplementedError
############################################################################
#
# token implementations
#
# Token
# +- CommonToken
# \- ClassicToken
#
############################################################################
class CommonToken(Token):
"""@brief Basic token implementation.
This implementation does not copy the text from the input stream upon
creation, but keeps start/stop pointers into the stream to avoid
unnecessary copy operations.
"""
def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
input=None, start=None, stop=None, oldToken=None):
Token.__init__(self)
if oldToken is not None:
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.index = oldToken.index
self._text = oldToken._text
if isinstance(oldToken, CommonToken):
self.input = oldToken.input
self.start = oldToken.start
self.stop = oldToken.stop
else:
self.type = type
self.input = input
self.charPositionInLine = -1 # set to invalid position
self.line = 0
self.channel = channel
#What token number is this from 0..n-1 tokens; < 0 implies invalid index
self.index = -1
# We need to be able to change the text once in a while. If
# this is non-null, then getText should return this. Note that
# start/stop are not affected by changing this.
self._text = text
# The char position into the input buffer where this token starts
self.start = start
# The char position into the input buffer where this token stops
# This is the index of the last char, *not* the index after it!
self.stop = stop
def getText(self):
if self._text is not None:
return self._text
if self.input is None:
return None
return self.input.substring(self.start, self.stop)
def setText(self, text):
"""
Override the text for this token. getText() will return this text
rather than pulling from the buffer. Note that this does not mean
that start/stop indexes are not valid. It means that that input
was converted to a new string in the token object.
"""
self._text = text
text = property(getText, setText)
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return self.input
def setInputStream(self, input):
self.input = input
def __str__(self):
if self.type == EOF:
return "<EOF>"
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is not None:
txt = txt.replace("\n","\\\\n")
txt = txt.replace("\r","\\\\r")
txt = txt.replace("\t","\\\\t")
else:
txt = "<no text>"
return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
self.index,
self.start, self.stop,
txt,
self.type, channelStr,
self.line, self.charPositionInLine
)
class ClassicToken(Token):
"""@brief Alternative token implementation.
A Token object like we'd use in ANTLR 2.x; has an actual string created
and associated with this object. These objects are needed for imaginary
tree nodes that have payload objects. We need to create a Token object
that has a string; the tree node will point at this token. CommonToken
has indexes into a char stream and hence cannot be used to introduce
new strings.
"""
def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
oldToken=None
):
Token.__init__(self)
if oldToken is not None:
self.text = oldToken.text
self.type = oldToken.type
self.line = oldToken.line
self.charPositionInLine = oldToken.charPositionInLine
self.channel = oldToken.channel
self.text = text
self.type = type
self.line = None
self.charPositionInLine = None
self.channel = channel
self.index = None
def getText(self):
return self.text
def setText(self, text):
self.text = text
def getType(self):
return self.type
def setType(self, ttype):
self.type = ttype
def getLine(self):
return self.line
def setLine(self, line):
self.line = line
def getCharPositionInLine(self):
return self.charPositionInLine
def setCharPositionInLine(self, pos):
self.charPositionInLine = pos
def getChannel(self):
return self.channel
def setChannel(self, channel):
self.channel = channel
def getTokenIndex(self):
return self.index
def setTokenIndex(self, index):
self.index = index
def getInputStream(self):
return None
def setInputStream(self, input):
pass
def toString(self):
channelStr = ""
if self.channel > 0:
channelStr = ",channel=" + str(self.channel)
txt = self.text
if txt is None:
txt = "<no text>"
return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
txt,
self.type,
channelStr,
self.line,
self.charPositionInLine
)
__str__ = toString
__repr__ = toString
EOF_TOKEN = CommonToken(type=EOF)
INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
# will avoid creating a token for this symbol and try to fetch another.
SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
| 0.003471 |
#
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <[email protected]>
#
# written by Stefano Sanfilippo <[email protected]>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from jsbeautifier.unpackers import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
| 0.003028 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Authors: Laetitia Gangloff
# Copyright (c) 2015 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class BusinessProductLocation(models.Model):
_name = 'business.product.location'
name = fields.Char(string='Name', required=True)
product_ids = fields.One2many('business.product.line',
'business_product_location_id',
string='Products')
location_ids = fields.One2many('stock.location',
'business_usage_id',
string='Locations')
class BusinessProductLine(models.Model):
_name = 'business.product.line'
_rec_name = "product_id"
@api.model
def _default_product_uom_id(self):
return self.env.ref('product.product_uom_unit')
product_id = fields.Many2one('product.product', string='Product',
required=True)
product_qty = fields.Float(
'Product Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'))
product_uom_id = fields.Many2one('product.uom', 'Product Unit of Measure',
required=True,
default=_default_product_uom_id)
business_product_location_id = fields.Many2one(
'business.product.location', 'Parent business product location',
required=True)
@api.onchange('product_id')
def _onchange_product_id(self):
""" Change UoM if product_id changes
"""
if self.product_id:
self.product_uom_id = self.product_id.uom_id
@api.onchange('product_uom_id')
def _onchange_product_uom_id(self):
""" Check the selected UoM with the product UoM
"""
res = {}
if self.product_id and self.product_uom_id:
if self.product_id.uom_id.category_id.id != \
self.product_uom_id.category_id.id:
res['warning'] = {
'title': _('Warning'),
'message': _('The Product Unit of Measure you chose '
'has a different category than in the '
'product form.')}
self.product_uom_id = self.product_id.uom_id
class Product(models.Model):
_inherit = 'product.product'
business_usage_ids = fields.One2many('business.product.line', 'product_id',
'Business Usage')
class StockLocation(models.Model):
_inherit = 'stock.location'
business_usage_id = fields.Many2one('business.product.location',
'Business Usage')
| 0 |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
from base import Device, Control, AbsoluteAxis, RelativeAxis, Button
from base import Joystick, AppleRemote
from base import DeviceExclusiveException
# non-broken c_void_p
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFStringRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
# Full list in IOHIDUsageTables.h
kHIDPage_GenericDesktop = 0x01
kHIDUsage_GD_Joystick = 0x04
kHIDUsage_GD_GamePad = 0x05
kHIDUsage_GD_Keyboard = 0x06
kHIDUsage_GD_Keypad = 0x07
kHIDUsage_GD_MultiAxisController = 0x08
kHIDUsage_GD_SystemAppMenu = 0x86
kHIDUsage_GD_SystemMenu = 0x89
kHIDUsage_GD_SystemMenuRight = 0x8A
kHIDUsage_GD_SystemMenuLeft = 0x8B
kHIDUsage_GD_SystemMenuUp = 0x8C
kHIDUsage_GD_SystemMenuDown = 0x8D
kHIDPage_Consumer = 0x0C
kHIDUsage_Csmr_Menu = 0x40
kHIDUsage_Csmr_FastForward = 0xB3
kHIDUsage_Csmr_Rewind = 0xB4
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12, 0xFA, 0x38, 0x6F, 0x1A, 0x11, 0xD4,
0xBA, 0x0C, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8, 0x58, 0x10, 0x9C, 0x11, 0xD4,
0x91, 0xD4, 0x00, 0x50, 0xE4, 0xC6, 0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD, 0x42, 0x0C, 0x6F, 0x14, 0x11, 0xD4,
0x94, 0x74, 0x00, 0x05, 0x02, 0x8F, 0x18, 0xD5)
IOHIDCallbackFunction = ctypes.CFUNCTYPE(None,
void_p, IOReturn, ctypes.c_void_p, ctypes.c_void_p)
CFRunLoopSourceRef = ctypes.c_void_p
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# Most of these function prototypes are not filled in yet because I haven't
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(CFRunLoopSourceRef))),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.POINTER(IOHIDEventStruct), AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDCallbackFunction, ctypes.c_void_p, ctypes.c_void_p)),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie, ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef, ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_matching_services(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
services = []
while carbon.IOIteratorIsValid(iterator):
service = carbon.IOIteratorNext(iterator)
if not service:
break
services.append(service)
carbon.IOObjectRelease(iterator)
return services
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfboolean_to_bool(value):
return bool(carbon.CFBooleanGetValue(value))
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
elif value_type == carbon.CFBooleanGetTypeID():
return cfboolean_to_bool(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print '%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class DarwinHIDDevice(Device):
'''
:IVariables:
`name` : str
`manufacturer` : str
'''
def __init__(self, display, generic_device):
super(DarwinHIDDevice, self).__init__(display, name=None)
self._device = self._get_device_interface(generic_device)
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
self.usage_page = get_property(properties, 'PrimaryUsagePage')
self.usage = get_property(properties, 'PrimaryUsage')
carbon.CFRelease(properties)
self._controls = self._init_controls()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _init_controls(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None, ctypes.byref(elements_array))
)
self._control_cookies = {}
controls = []
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
control = _create_control(properties)
if control:
controls.append(control)
self._control_cookies[control._cookie] = control
carbon.CFRelease(elements_array)
return controls
def open(self, window=None, exclusive=False):
super(DarwinHIDDevice, self).open(window, exclusive)
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise DeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all controls into queue
for control in self._controls:
r = self._queue.contents.contents.addElement(self._queue,
control._cookie, 0)
if r != 0:
print 'error adding %r' % control
self._event_source = CFRunLoopSourceRef()
self._queue_callback_func = IOHIDCallbackFunction(self._queue_callback)
_oscheck(
self._queue.contents.contents.createAsyncEventSource(self._queue,
ctypes.byref(self._event_source))
)
_oscheck(
self._queue.contents.contents.setEventCallout(self._queue,
self._queue_callback_func, None, None)
)
event_loop = pyglet.app.platform_event_loop._event_loop
carbon.GetCFRunLoopFromEventLoop.restype = void_p
run_loop = carbon.GetCFRunLoopFromEventLoop(event_loop)
kCFRunLoopDefaultMode = \
CFStringRef.in_dll(carbon, 'kCFRunLoopDefaultMode')
carbon.CFRunLoopAddSource(run_loop,
self._event_source,
kCFRunLoopDefaultMode)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
def close(self):
super(DarwinHIDDevice, self).close()
if not self._open:
return
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
def get_controls(self):
return self._controls
def _queue_callback(self, target, result, refcon, sender):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0, 0)
while r == 0:
try:
control = self._control_cookies[event.elementCookie]
control._set_value(event.value)
except KeyError:
pass
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0, 0)
_axis_names = {
(0x01, 0x30): 'x',
(0x01, 0x31): 'y',
(0x01, 0x32): 'z',
(0x01, 0x33): 'rx',
(0x01, 0x34): 'ry',
(0x01, 0x35): 'rz',
(0x01, 0x38): 'wheel',
(0x01, 0x39): 'hat',
}
_button_names = {
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemAppMenu): 'menu',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenu): 'select',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuRight): 'right',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuLeft): 'left',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuUp): 'up',
(kHIDPage_GenericDesktop, kHIDUsage_GD_SystemMenuDown): 'down',
(kHIDPage_Consumer, kHIDUsage_Csmr_FastForward): 'right_hold',
(kHIDPage_Consumer, kHIDUsage_Csmr_Rewind): 'left_hold',
(kHIDPage_Consumer, kHIDUsage_Csmr_Menu): 'menu_hold',
(0xff01, 0x23): 'select_hold',
}
def _create_control(properties):
type = get_property(properties, 'Type')
if type not in (kIOHIDElementTypeInput_Misc,
kIOHIDElementTypeInput_Axis,
kIOHIDElementTypeInput_Button):
return
cookie = get_property(properties, 'ElementCookie')
usage_page = get_property(properties, 'UsagePage')
usage = get_property(properties, 'Usage')
raw_name = get_property(properties, 'Name')
if not raw_name:
raw_name = '%d:%d' % (usage_page, usage)
if type in (kIOHIDElementTypeInput_Misc, kIOHIDElementTypeInput_Axis):
name = _axis_names.get((usage_page, usage))
relative = get_property(properties, 'IsRelative')
if relative:
control = RelativeAxis(name, raw_name)
else:
min = get_property(properties, 'Min')
max = get_property(properties, 'Max')
control = AbsoluteAxis(name, min, max, raw_name)
elif type == kIOHIDElementTypeInput_Button:
name = _button_names.get((usage_page, usage))
control = Button(name, raw_name)
else:
return
control._cookie = cookie
return control
def _create_joystick(device):
# Ignore desktop devices that are not joysticks, gamepads or m-a controllers
if device.usage_page == kHIDPage_GenericDesktop and \
device.usage not in (kHIDUsage_GD_Joystick,
kHIDUsage_GD_GamePad,
kHIDUsage_GD_MultiAxisController):
return
# Anything else is interesting enough to be a joystick?
return Joystick(device)
def get_devices(display=None):
services = get_matching_services(get_master_port(),
get_matching_dictionary())
return [DarwinHIDDevice(display, service) for service in services]
def get_joysticks(display=None):
return filter(None,
[_create_joystick(device) for device in get_devices(display)])
def get_apple_remote(display=None):
for device in get_devices(display):
if device.name == 'Apple IR':
return AppleRemote(device)
| 0.005295 |
'''
PyMySQL: A pure-Python drop-in replacement for MySQLdb.
Copyright (c) 2010 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 5, None)
from constants import FIELD_TYPE
from converters import escape_dict, escape_sequence, escape_string
from err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
from sets import BaseSet as set
except ImportError:
from sets import Set as set
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
return str(x)
def Connect(*args, **kwargs):
"""
Connect to the database; see connections.Connection.__init__() for
more information.
"""
from connections import Connection
return Connection(*args, **kwargs)
from pymysql import connections as _orig_conn
Connect.__doc__ = _orig_conn.Connection.__init__.__doc__ + """\nSee connections.Connection.__init__() for
information about defaults."""
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
return '%s.%s.%s' % VERSION
connect = Connection = Connect
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
def thread_safe():
return True # match MySQLdb.thread_safe()
def install_as_MySQLdb():
"""
After this function is called, any application that imports MySQLdb or
_mysql will unwittingly actually use
"""
sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"]
__all__ = [
'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"install_as_MySQLdb",
"NULL","__version__",
]
| 0.007299 |
#!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
import argparse
import base64
import eviltransform
import json
import logging
import os
import pickle
import time
import urllib2
# Initialize logging.
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
# Names starting with '.' are calculated in get_hotel_field() below.
HOTEL_FIELDS = ('hotel_id', '.lat', '.lon', 'name', 'address', 'class', '.rate', 'ranking', 'review_score', 'url', 'hoteltype_id', '.trans')
class BookingApi:
def __init__(self, login, password):
self.login = login
self.password = password
self.baseConfig = {
"headers": {
"Content-Type": "application/json",
"Authorization": "Basic " + base64.encodestring(
"{login}:{password}".format(login=self.login, password=self.password)).replace('\n', '')
},
"url": 'https://distribution-xml.booking.com/json/bookings'}
self.checkMinute = 0
self.requestPerMinute = 0
self.requestLimit = 15 # request per minute
def call(self, function, params=None):
self.requestPerMinute += 1
now = datetime.utcnow()
if self.requestPerMinute >= self.requestLimit:
waittime = 60 - now.second
logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
time.sleep(waittime)
now = datetime.utcnow()
if self.checkMinute != now.minute:
self.requestPerMinute = 0
self.checkMinute = now.minute
payload = ''
try:
p = "" if not params else '?' + "&".join(
["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
request = urllib2.Request(url, None, self.baseConfig["headers"])
stream = urllib2.urlopen(request)
payload = stream.read()
data = json.loads(payload)
if isinstance(data, dict) and 'message' in data and 'code' in data:
logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
return None
return data
except Exception as e:
logging.error('Error: {0} Context: {1}'.format(e, payload))
return None
def download(user, password, path):
'''
Downloads all hotels from booking.com and stores them in a bunch of .pkl files.
'''
api = BookingApi(user, password)
maxrows = 1000
countries = api.call("getCountries", dict(languagecodes='en'))
for country in countries:
countrycode = country['countrycode']
logging.info(u'Download[{0}]: {1}'.format(countrycode, country['name']))
allhotels = {}
while True:
hotels = api.call('getHotels',
dict(new_hotel_type=1, offset=len(allhotels), rows=maxrows, countrycodes=countrycode))
# Check for error.
if hotels is None:
logging.critical('No hotels downloaded for country {0}'.format(country['name']))
break
for h in hotels:
allhotels[h['hotel_id']] = h
# If hotels in answer less then maxrows, we reach end of data.
if len(hotels) < maxrows:
break
if not hotels:
continue
# Now the same for hotel translations
offset = 0
while True:
hotels = api.call('getHotelTranslations', dict(offset=offset, rows=maxrows, countrycodes=countrycode))
if hotels is None:
exit(1)
# Add translations for each hotel
for h in hotels:
if h['hotel_id'] in allhotels:
if 'translations' not in allhotels[h['hotel_id']]:
allhotels[h['hotel_id']]['translations'] = {}
allhotels[h['hotel_id']]['translations'][h['languagecode']] = {'name': h['name'], 'address': h['address']}
offset += len(hotels)
if len(hotels) < maxrows:
break
logging.info('Num of hotels: {0}, translations: {1}'.format(len(allhotels), offset))
filename = os.path.join(path,
'{0} - {1}.pkl'.format(country['area'].encode('utf8'), country['name'].encode('utf8')))
with open(filename, 'wb') as fd:
pickle.dump(allhotels.values(), fd, pickle.HIGHEST_PROTOCOL)
def translate(source, output):
'''
Reads *.pkl files and produces a single list of hotels as tab separated values.
'''
files = [os.path.join(source, filename)
for filename in os.listdir(source) if filename.endswith('.pkl')]
data = []
for filename in sorted(files):
logging.info('Processing {0}'.format(filename))
with open(filename, 'rb') as fd:
data += pickle.load(fd)
# Fix chinese coordinates
for hotel in data:
if hotel['countrycode'] == 'cn' and 'location' in hotel:
try:
hotel['location']['latitude'], hotel['location']['longitude'] = eviltransform.gcj2wgs_exact(
float(hotel['location']['latitude']), float(hotel['location']['longitude']))
except ValueError:
# We don't care if there were errors converting coordinates to float
pass
# Dict of dicts city_id -> { currency -> [prices] }
cities = defaultdict(lambda: defaultdict(list))
def valid(hotel):
return 'city_id' in hotel and 'currencycode' in hotel and 'minrate' in hotel and hotel['minrate'] is not None
# Collect prices
for hotel in data:
if valid(hotel):
cities[hotel['city_id']][hotel['currencycode']].append(float(hotel['minrate']))
# Replaces list of prices by a median price.
for city in cities:
for cur in cities[city]:
cities[city][cur] = sorted(cities[city][cur])[len(cities[city][cur]) / 2]
# Price rate ranges, relative to the median price for a city
rates = (0.7, 1.3)
def get_hotel_field(hotel, field, rate):
if field == '.lat':
return hotel['location']['latitude']
elif field == '.lon':
return hotel['location']['longitude']
elif field == '.rate':
return rate
elif field == '.trans':
# Translations are packed into a single column: lang1|name1|address1|lang2|name2|address2|...
if 'translations' in hotel:
tr_list = []
for tr_lang, tr_values in hotel['translations'].items():
tr_list.append(tr_lang)
tr_list.extend([tr_values[e] for e in ('name', 'address')])
return '|'.join([s.replace('|', ';') for s in tr_list])
else:
return ''
elif field in hotel:
return hotel[field]
elif field == 'ranking':
# This field is not used yet, and booking.com sometimes blocks it.
return ''
logging.error('Unknown hotel field: {0}, URL: {1}'.format(field, hotel['url']))
return ''
with open(output, 'w') as fd:
for hotel in data:
rate = 0
if valid(hotel):
avg = cities[hotel['city_id']][hotel['currencycode']]
price = float(hotel['minrate'])
rate = 1
# Find a range that contains the price
while rate <= len(rates) and price > avg * rates[rate - 1]:
rate += 1
l = [get_hotel_field(hotel, e, rate) for e in HOTEL_FIELDS]
print('\t'.join([unicode(f).encode('utf8').replace('\t', ' ').replace('\n', ' ').replace('\r', '') for f in l]), file=fd)
def process_options():
parser = argparse.ArgumentParser(description='Download and process booking hotels.')
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--password", dest="password", help="Booking.com account password")
parser.add_argument("--user", dest="user", help="Booking.com account user name")
parser.add_argument("--path", dest="path", help="Path to data files")
parser.add_argument("--output", dest="output", help="Name and destination for output file")
parser.add_argument("--download", action="store_true", dest="download", default=False)
parser.add_argument("--translate", action="store_true", dest="translate", default=False)
options = parser.parse_args()
if not options.download and not options.translate:
parser.print_help()
# TODO(mgsergio): implpement it with argparse facilities.
if options.translate and not options.output:
print("--output isn't set")
parser.print_help()
exit()
return options
def main():
options = process_options()
if options.download:
download(options.user, options.password, options.path)
if options.translate:
translate(options.path, options.output)
if __name__ == "__main__":
main()
| 0.003462 |
# These functions are part of the python-colorama module
# They have been adjusted slightly for LinkChecker
#
# Copyright: (C) 2010 Jonathan Hartley <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name(s) of the copyright holders nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# from winbase.h
STDOUT = -11
STDERR = -12
from ctypes import (windll, byref, Structure, c_char, c_short, c_uint32,
c_ushort, ArgumentError, WinError)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
"""Get string representation of console screen buffer info."""
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
"""Get console screen buffer info object."""
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
if not success:
raise WinError()
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
# from wincon.h
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
_default_foreground = None
_default_background = None
_default_style = None
def init():
"""Initialize foreground and background attributes."""
global _default_foreground, _default_background, _default_style
try:
attrs = GetConsoleScreenBufferInfo().wAttributes
except (ArgumentError, WindowsError):
_default_foreground = GREY
_default_background = BLACK
_default_style = NORMAL
else:
_default_foreground = attrs & 7
_default_background = (attrs >> 4) & 7
_default_style = attrs & BRIGHT
def get_attrs(foreground, background, style):
"""Get foreground and background attributes."""
return foreground + (background << 4) + style
def set_console(stream=STDOUT, foreground=None, background=None, style=None):
"""Set console foreground and background attributes."""
if foreground is None:
foreground = _default_foreground
if background is None:
background = _default_background
if style is None:
style = _default_style
attrs = get_attrs(foreground, background, style)
SetConsoleTextAttribute(stream, attrs)
def reset_console(stream=STDOUT):
"""Reset the console."""
set_console(stream=stream)
def get_console_size():
"""Get the console size."""
return GetConsoleScreenBufferInfo().dwSize
| 0.004512 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.