Dataset Viewer
text
stringlengths 820
1M
| score
float64 0
0.24
|
---|---|
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render
from .forms import ContactForm, SignUpForm
from .models import SignUp
# Create your views here.
def home(request):
title = 'Sign Up Now'
form = SignUpForm(request.POST or None)
context = {
"title": title,
"form": form
}
if form.is_valid():
#form.save()
#print request.POST['email'] #not recommended
instance = form.save(commit=False)
full_name = form.cleaned_data.get("full_name")
if not full_name:
full_name = "New full name"
instance.full_name = full_name
# if not instance.full_name:
# instance.full_name = "Justin"
instance.save()
context = {
"title": "Thank you"
}
if request.user.is_authenticated() and request.user.is_staff:
#print(SignUp.objects.all())
# i = 1
# for instance in SignUp.objects.all():
# print(i)
# print(instance.full_name)
# i += 1
queryset = SignUp.objects.all().order_by('-timestamp') #.filter(full_name__iexact="Justin")
#print(SignUp.objects.all().order_by('-timestamp').filter(full_name__iexact="Justin").count())
context = {
"queryset": queryset
}
return render(request, "home.html", context)
def contact(request):
title = 'Contact Us'
title_align_center = True
form = ContactForm(request.POST or None)
if form.is_valid():
# for key, value in form.cleaned_data.iteritems():
# print key, value
# #print form.cleaned_data.get(key)
form_email = form.cleaned_data.get("email")
form_message = form.cleaned_data.get("message")
form_full_name = form.cleaned_data.get("full_name")
# print email, message, full_name
subject = 'Site contact form'
from_email = settings.EMAIL_HOST_USER
to_email = [from_email, '[email protected]']
contact_message = "%s: %s via %s"%(
form_full_name,
form_message,
form_email)
some_html_message = """
<h1>hello</h1>
"""
send_mail(subject,
contact_message,
from_email,
to_email,
html_message=some_html_message,
fail_silently=True)
context = {
"form": form,
"title": title,
"title_align_center": title_align_center,
}
return render(request, "forms.html", context)
| 0.040835 |
import oebakery
from oebakery import die, err, warn, info, debug
import os
import operator
import bb
# Handle all the arhicture related variables.
# To be able to reuse definitions for both build, machine and sdk
# architectures, the usual bitbake variables are not used, but a more
# hierarchical setup using a number of Python dictionaries.
gccspecs = {}
cpuspecs = {
'm68k' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, foobar',
},
'mcf51' : {
'mcpu' : '51',
},
'mcf51ac' : {
'mcpu' : '51ac',
},
'mcf51cn' : {
'mcpu' : '51cn',
},
'mcf51em' : {
'mcpu' : '51em',
},
'mcf51qe' : {
'mcpu' : '51qe',
},
'mcf5206' : {
'mcpu' : '5206',
},
'mcf5206e' : {
'mcpu' : '5206e',
},
'mcf5208' : {
'mcpu' : '5208',
},
'mcf52277' : {
'mcpu' : '52277',
},
},
'powerpc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, PowerPC or cisco 4500',
},
'603e' : {
'mcpu' : '603e',
'float' : 'hard',
},
'e300c1' : {
'mcpu' : 'e300c1',
'float' : 'hard',
},
'e300c2' : {
'mcpu' : 'e300c2',
},
'e300c3' : {
'mcpu' : 'e300c3',
'float' : 'hard',
},
'e300c4' : {
'mcpu' : 'e300c4',
'float' : 'hard',
},
},
'powerpc64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'b',
},
},
'arm' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, ARM',
'abi flags' : [
['arm abi', 'eabi', {
'eabi' : {
'os' : 'eabi',
},
# Currently, OE-lite does only support EABI for
# ARM. When/if OABI is added, os should be kept as
# linux-gnu for OABI
}
],
]
},
'920t' : {
'mcpu' : 'arm920t',
'mtune' : 'arm920t',
},
'926ejs' : {
'march' : 'armv5te',
'mcpu' : 'arm926ej-s',
'mtune' : 'arm926ej-s',
},
'1176jzfs' : {
'march' : 'armv6',
'mcpu' : 'arm1176jzf-s',
'mtune' : 'arm1176jzf-s',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'vfp',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'vfp',
},
'soft' : {
'float' : 'soft',
},
}
]
]
},
'cortexa7' : {
'mcpu' : 'cortex-a7',
'mtune' : 'cortex-a7',
'abi flags' : [
['float abi', 'softfp', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon-vfpv4',
'vendor' : 'hf',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon-vfpv4',
'vendor' : '',
},
'soft' : {
'float' : 'soft',
'vendor' : 'soft',
},
}
],
['instruction set', 'thumb', {
'arm' : { },
'thumb' : {
'thumb' : '1',
'vendor' : 't',
},
}
],
]
},
'cortexa8' : {
'mcpu' : 'cortex-a8',
'mtune' : 'cortex-a8',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
'cortexa9' : {
'mcpu' : 'cortex-a9',
'mtune' : 'cortex-a9',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
},
'armeb' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'avr32' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mips' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mipsel' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sparc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'bfin' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh3' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh4' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'i386' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i386',
'fpu' : '387',
'float' : 'hard',
},
},
'i486' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i486',
'fpu' : '387',
'float' : 'hard',
},
'winchipc6' : {
'march' : 'winchip-c6',
},
'winchip2' : {
'march' : 'winchip2',
},
},
'i586' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i586',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium-mmx',
},
'k6' : {
'march' : 'k6',
},
'k62' : {
'march' : 'k6-2',
},
'geode' : {
'march' : 'geode',
},
'c3' : {
'march' : 'c3',
},
'c32' : {
'march' : 'c3-2',
},
},
'i686' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i686',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium2',
},
'sse' : {
'march' : 'pentium3',
'fpu' : 'sse',
},
'sse2' : {
'march' : 'pentium-m',
'fpu' : 'sse',
},
'athlon' : {
'march' : 'athlon',
},
'athlon4' : {
'march' : 'athlon-4',
'fpu' : 'sse',
},
},
'i786' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'pentium4',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'prescott',
},
},
'x86_64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
'elf' : 'ELF 64-bit LSB .*, x86-64',
'march' : 'opteron',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'k8-sse3',
},
'nocona' : {
'march' : 'nocona',
},
'core2' : {
'march' : 'core2',
},
'atom' : {
'march' : 'atom',
},
'amdfam10' : {
'march' : 'amdfam10',
},
},
'ia64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
},
},
}
cpumap = {
'powerpc' : {
'mpc5121e' : 'e300c4',
'mpc5125' : 'e300c4',
'mpc8313' : 'e300c3',
'mpc8313e' : 'e300c3',
'mpc8360' : 'e300c1',
'mpc8270' : 'g2le',
},
'arm' : {
'at91rm9200' : '920t',
'at91sam9260' : '926ejs',
'omap3520' : ('cortexa8', ('omap3', 'omap')),
'omap3530' : ('cortexa8', ('omap3', 'omap')),
'omap4430' : ('cortexa9neon', ('omap4', 'omap')),
'omap4440' : ('cortexa9neon', ('omap4', 'omap')),
'imx21' : ('926ejs', 'imx'),
'imx23' : ('926ejs', 'mxs'),
'imx25' : ('926ejs', 'imx'),
'imx27' : ('926ejs', 'imx'),
'imx28' : ('926ejs', 'mxs'),
'imx280' : ('926ejs', ('imx28', 'mxs')),
'imx281' : ('926ejs', ('imx28', 'mxs')),
'imx283' : ('926ejs', ('imx28', 'mxs')),
'imx285' : ('926ejs', ('imx28', 'mxs')),
'imx286' : ('926ejs', ('imx28', 'mxs')),
'imx287' : ('926ejs', ('imx28', 'mxs')),
'imx31' : ('1136jfs', 'imx'),
'imx35' : ('1136jfs', 'imx'),
'imx51' : ('cortexa8', 'imx'),
'imx512' : ('cortexa8', ('imx51', 'imx')),
'imx513' : ('cortexa8', ('imx51', 'imx')),
'imx514' : ('cortexa8', ('imx51', 'imx')),
'imx515' : ('cortexa8', ('imx51', 'imx')),
'imx516' : ('cortexa8', ('imx51', 'imx')),
'imx53' : ('cortexa8', 'imx'),
'imx534' : ('cortexa8', ('imx53', 'imx')),
'imx535' : ('cortexa8', ('imx53', 'imx')),
'imx536' : ('cortexa8', ('imx53', 'imx')),
'imx537' : ('cortexa8', ('imx53', 'imx')),
'imx538' : ('cortexa8', ('imx53', 'imx')),
'imx6' : ('cortexa9', 'imx'),
'ls1021a' : ('cortexa7', ('ls102x', 'ls1', 'layerscape')),
'imx6sl' : ('cortexa9', ('imx6', 'imx')),
'imx6dl' : ('cortexa9', ('imx6', 'imx')),
'imx6q' : ('cortexa9', ('imx6', 'imx')),
},
'x86' : {
'celeronm575' : (('i686', 'sse2'),),
},
}
osspecs = {
'mingw32' : {
'exeext' : '.exe',
'elf' : 'PE32 .* for MS Windows .* Intel 80386 32-bit',
},
}
def init(d):
sanity(d)
gcc_version = d.get('GCC_VERSION')
arch_set_build_arch(d, gcc_version)
arch_set_cross_arch(d, 'MACHINE', gcc_version)
arch_set_cross_arch(d, 'SDK', gcc_version)
return
def sanity(d):
import bb
fail = False
sdk_cpu = d.get("SDK_CPU")
if not sdk_cpu:
bb.error("SDK_CPU not set")
fail = True
sdk_os = d.get("SDK_OS")
if not sdk_os:
bb.error("SDK_OS not set")
fail = True
machine = d.get("MACHINE")
machine_cpu = d.get("MACHINE_CPU")
machine_os = d.get("MACHINE_OS")
if machine:
pass
elif machine_cpu and machine_os:
pass
elif machine_cpu:
bb.error("MACHINE_CPU set, but not MACHINE_OS")
fail = True
elif machine_os:
bb.error("MACHINE_OS set, but not MACHINE_CPU")
fail = True
else:
bb.error("MACHINE or MACHINE_CPU and MACHINE_OS must be set")
fail = True
if fail:
bb.fatal("Invalid MACHINE and/or SDK specification\n"
"Check your conf/local.conf file and/or machine and distro config files.")
return
def update(d):
gcc_version = d.get('GCC_VERSION')
arch_update(d, 'BUILD', gcc_version)
arch_update(d, 'HOST', gcc_version)
arch_update(d, 'TARGET', gcc_version)
return
def arch_set_build_arch(d, gcc_version):
try:
guess = globals()['config_guess_cache']
except KeyError:
#bb.debug("config.guess")
script = arch_find_script(d, 'config.guess')
try:
guess = arch_split(os.popen(script).readline().strip())
except OSError, e:
#bb.fatal('config.guess failed: '+e)
return None
config_guess_cache = guess
globals()['config_guess_cache'] = config_guess_cache
# Replace the silly 'pc' vendor with 'unknown' to yield a result
# comparable with arch_cross().
if guess[1] == 'pc':
guess[1] = 'unknown'
guess[1] = "build_" + guess[1]
d.set('BUILD_ARCH', '-'.join(guess))
return
def arch_set_cross_arch(d, prefix, gcc_version):
cross_arch = '%s-%s'%(d.get(prefix+'_CPU', True),
d.get(prefix+'_OS', True))
cross_arch = arch_config_sub(d, cross_arch)
abis = (d.get(prefix+'_ABI', True) or "").split()
if prefix == "MACHINE":
vendor_prefix = None
else:
vendor_prefix = prefix.lower() + "_"
cross_arch = arch_fixup(cross_arch, gcc_version, abis, vendor_prefix)
d[prefix+'_ARCH'] = cross_arch[0]
if cross_arch[1]:
d[prefix+'_CPU_FAMILIES'] = " ".join(cross_arch[1])
return
def arch_update(d, prefix, gcc_version):
arch = d.get(prefix+'_ARCH', True)
gccspec = arch_gccspec(arch, gcc_version)
(cpu, vendor, os) = arch_split(arch)
d[prefix+'_CPU'] = cpu
d[prefix+'_VENDOR'] = vendor
d[prefix+'_OS'] = os
ost = os.split('-',1)
if len(ost) > 1:
d[prefix+'_BASEOS'] = ost[0]
else:
d[prefix+'_BASEOS'] = ""
for spec in gccspec:
if spec in ("abi flags"):
continue
d[prefix+'_'+spec.upper()] = gccspec[spec]
return
def arch_fixup(arch, gcc, abis, vendor_prefix=None):
import re
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
if vendor == 'pc':
vendor = 'unknown'
families = []
if cpu in cpumap and vendor in cpumap[cpu]:
mapto = cpumap[cpu][vendor]
families = [vendor]
if isinstance(mapto, basestring):
vendor = mapto
else:
assert isinstance(mapto, tuple) and len(mapto) in (1, 2)
if isinstance(mapto[0], basestring):
vendor = mapto[0]
else:
assert isinstance(mapto[0], tuple) and len(mapto[0]) == 2
cpu = mapto[0][0]
vendor = mapto[0][1]
if len(mapto) > 1:
if isinstance(mapto[1], basestring):
families.append(mapto[1])
else:
assert isinstance(mapto[1], tuple)
families.extend(mapto[1])
families.append(vendor)
if cpu == "powerpc":
if vendor in ('e300c1', 'e300c4'):
vendor = '603e'
if vendor in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
vendor = '603e'
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
pass
elif vendor == 'unknown':
pass
else:
bb.fatal("unknown cpu vendor: %s"%vendor)
vendor = 'unknown'
# Merge DEFAULT and vendor abi_flags, keeping DEFAULT flags first
abi_flags = []
if "DEFAULT" in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu]["DEFAULT"]:
abi_flags += cpuspecs[cpu]["DEFAULT"]["abi flags"]
if vendor in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu][vendor]:
for abi_flag in cpuspecs[cpu][vendor]['abi flags']:
try:
flag_index = map(operator.itemgetter(0), abi_flags).index(
abi_flag)
abi_flags[flag_index][1] = abi_flag[1]
for flag_value in abi_flag[2].items():
abi_flags[flag_index][2][flag_value[0]] = flag_value[1]
except ValueError:
abi_flags.append(abi_flag)
if abi_flags:
cpuspec = cpuspecs[cpu][vendor]
extra_vendor = []
extra_os = []
for abi_flag in abi_flags:
diff = set(abis).intersection(set(abi_flag[2]))
if len(diff) > 1:
bb.fatal("ABI with %s is invalid, only one of %s should be given"
% (', '.join(diff), ', '.join(abi_flag[2].keys())))
if len(diff) == 1:
abi_select = diff.pop()
abis.remove(abi_select)
else:
abi_select = abi_flag[1]
if 'vendor' in abi_flag[2][abi_select]:
extra_vendor.append(abi_flag[2][abi_select].pop('vendor'))
if 'os' in abi_flag[2][abi_select]:
extra_os.append(abi_flag[2][abi_select].pop('os'))
cpuspec.update(abi_flag[2][abi_select])
vendor = vendor + ''.join(extra_vendor)
os = os + ''.join(extra_os)
cpuspecs[cpu].update({vendor : cpuspec})
if len(abis) > 0:
bb.fatal("ABI %s not valid for arch %s-%s-%s" %(', '.join(abis), cpu,vendor,os))
if vendor_prefix:
vendor = vendor_prefix + vendor
return ('-'.join((cpu, vendor, os)), families)
def arch_gccspec(arch, gcc):
import re
if gcc in gccspecs:
if arch in gccspecs[gcc]:
return gccspecs[gcc][arch]
else:
gccspecs[gcc] = {}
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
gccspec = {}
if cpu in cpuspecs:
gccspec.update(cpuspecs[cpu]['DEFAULT'])
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
gccspec.update(cpuspecs[cpu][vendor])
if os in osspecs:
gccspec.update(osspecs[os])
try:
if gccspec['mcpu'] in ('e300c1', 'e300c4'):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c1', 'e300c4'):
gccspec['mtune'] = '603e'
if gccspec['mcpu'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mtune'] = '603e'
except KeyError, e:
#bb.debug("KeyError in arch_gccspec: ")
pass
gccspecs[gcc][arch] = gccspec
return gccspec
def arch_config_sub(d, arch):
try:
config_sub_cache = globals()['config_sub_cache']
except KeyError:
config_sub_cache = {}
globals()['config_sub_cache'] = config_sub_cache
try:
canonical_arch = config_sub_cache[arch]
except KeyError:
script = arch_find_script(d, 'config.sub')
try:
bb.debug("%s %s"%(script, arch))
canonical_arch = os.popen("%s %s"%(script, arch)).readline().strip()
config_sub_cache[arch] = canonical_arch
except OSError, e:
bb.error("config.sub(%s) failed: %s"%(arch, e))
return arch
return canonical_arch
def arch_split(arch):
archtuple = arch.split('-', 2)
if len(archtuple) == 3:
return archtuple
else:
bb.error('invalid arch string: '+arch)
return None
def arch_find_script(d, filename):
try:
scripts = globals()['arch_scripts']
except KeyError:
scripts = {}
globals()['arch_scripts'] = scripts
if not filename in scripts:
for oepath in d.get('OEPATH', 1).split(':'):
filepath = os.path.join(oepath, 'scripts', filename)
if os.path.isfile(filepath):
#bb.debug("found %s: %s"%(filename, filepath))
scripts[filename] = filepath
break
if not filename in scripts:
bb.error('could not find script: %s'%filename)
return scripts[filename]
| 0.014907 |
# -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
IAF Neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
'''
'''
First, we import all necessary modules for simulation and plotting
'''
import nest
import pylab
'''
Second the Function build_network is defined to build the network and
return the handles of the spike detector and the voltmeter
'''
def build_network(dt) :
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads" : 1, "resolution" : dt})
neuron = nest.Create('iaf_neuron')
nest.SetStatus(neuron, "I_e", 376.0)
vm = nest.Create('voltmeter')
nest.SetStatus(vm, "withtime", True)
sd = nest.Create('spike_detector')
nest.Connect(vm, neuron)
nest.Connect(neuron, sd)
return vm, sd
'''
The function build_network takes the resolution as argument.
First the Kernel is reset and the number of threads is set to zero as
well as the resolution to the specified value dt. The iaf_neuron is
created and the handle is stored in the variable neuron The status of
the neuron is changed so it receives an external current. Next the
voltmeter is created and the handle stored in vm and the option
'withtime' is set, therefore times are given in the times vector in
events. Now the spike_detecor is created and its handle is stored in
sd.
Voltmeter and spikedetector are then connected to the neuron. The
connect function takes the handles as input. The Voltmeter is
connected to the neuron and the neuron to the spikedetector because
the neuron sends spikes to the detector and the voltmeter 'observes'
the neuron.
'''
'''
The neuron is simulated for three different resolutions and then
the voltage trace is plotted
'''
for dt in [0.1, 0.5, 1.0] :
print("Running simulation with dt=%.2f" % dt)
vm, sd = build_network(dt)
'''
First using build_network the network is build and the handles of
the spike detector and the voltmeter are stored in vm and sd
'''
nest.Simulate(1000.0)
'''
The network is simulated using `Simulate`, which takes the desired
simulation time in milliseconds and advances the network state by
this amount of time. During simulation, the `spike_detector`
counts the spikes of the target neuron and the total number is
read out at the end of the simulation period.
'''
potentials = nest.GetStatus(vm, "events")[0]["V_m"]
times = nest.GetStatus(vm, "events")[0]["times"]
'''
The values of the voltage recorded by the voltmeter are read out
and the values for the membrane potential are stored in potential
and the corresponding times in the times array
'''
pylab.plot(times, potentials, label="dt=%.2f" % dt)
print(" Number of spikes: {0}".format(nest.GetStatus(sd, "n_events")[0]))
'''
Using the pylab library the voltage trace is plotted over time
'''
pylab.legend(loc=3)
pylab.xlabel("time (ms)")
pylab.ylabel("V_m (mV)")
'''
Finally the axis are labelled and a legend is generated
'''
| 0.00374 |
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Build tool setup for optimized environments.
This module is a SCons tool which setups environments for optimized builds.
It is used as follows:
optimized_env = env.Clone(tools = ['target_optimized'])
"""
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Add in general options.
env['TARGET_DEBUG'] = False
env.Append(
CPPDEFINES=['NDEBUG'] + env.get('CPPDEFINES_OPTIMIZED', []),
CCFLAGS=env.get('CCFLAGS_OPTIMIZED', []),
LINKFLAGS=env.get('LINKFLAGS_OPTIMIZED', []),
)
| 0.002342 |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 0.004288 |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Peer(object):
def __init__(self, address, jobs, rel_perf, pubkey):
self.address = address # string: IP address
self.jobs = jobs # integer: number of CPUs
self.relative_performance = rel_perf
self.pubkey = pubkey # string: pubkey's fingerprint
self.shells = set() # set of strings
self.needed_work = 0
self.assigned_work = 0
self.tests = [] # list of TestCase objects
self.trusting_me = False # This peer trusts my public key.
self.trusted = False # I trust this peer's public key.
def __str__(self):
return ("Peer at %s, jobs: %d, performance: %.2f, trust I/O: %s/%s" %
(self.address, self.jobs, self.relative_performance,
self.trusting_me, self.trusted))
def AddTests(self, shell):
"""Adds tests from |shell| to this peer.
Stops when self.needed_work reaches zero, or when all of shell's tests
are assigned."""
assert self.needed_work > 0
if shell.shell not in self.shells:
self.shells.add(shell.shell)
while len(shell.tests) > 0 and self.needed_work > 0:
t = shell.tests.pop()
self.needed_work -= t.duration
self.assigned_work += t.duration
shell.total_duration -= t.duration
self.tests.append(t)
def ForceAddOneTest(self, test, shell):
"""Forcibly adds another test to this peer, disregarding needed_work."""
if shell.shell not in self.shells:
self.shells.add(shell.shell)
self.needed_work -= test.duration
self.assigned_work += test.duration
shell.total_duration -= test.duration
self.tests.append(test)
def Pack(self):
"""Creates a JSON serializable representation of this Peer."""
return [self.address, self.jobs, self.relative_performance]
@staticmethod
def Unpack(packed):
"""Creates a Peer object built from a packed representation."""
pubkey_dummy = "" # Callers of this don't care (only the server does).
return Peer(packed[0], packed[1], packed[2], pubkey_dummy)
| 0.004493 |
import os
import random
class CPU(object):
"""
The Chip-8 has 4KB of RAM from 0x000 to 0xFFF. The original interpreter is stored in memory
from 0x000 to 0x1FF so most programs will start at 0x200. The Chip-8 has 16 8-bit registers
and a 16-bit register that stores memory addresses. There are also 2 8-bit registers that
are the delay and sound timers. The stack can hold 16 16-bit values. The Chip-8 had a 16-bit
keypad from 0~9 and A~F.
"""
def __init__(self, display):
"""
Initializes all the needed components of the Chip-8 CPU to their proper values
"""
self.memory = [0] * 4096
self.registers = [0] * 16
self.address = [0] * 16
self.stack = [0] * 16
self.keys = [0] * 16
self.display_pixels = [[0 for _ in range(64)] for _ in range(32)]
self.pc = 0x200
self.sp = 0
self.register_I = 0
self.delay_timer = 0
self.sound_timer = 0
self.display = display
self.draw = False
self.test = True
self.font_set = [
0xF0, 0x90, 0x90, 0x90, 0xF0, # 0
0x20, 0x60, 0x20, 0x20, 0x70, # 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, # 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, # 3
0x90, 0x90, 0xF0, 0x10, 0x10, # 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, # 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, # 6
0xF0, 0x10, 0x20, 0x40, 0x40, # 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, # 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, # 9
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80 # F
]
for x in range(0, len(self.font_set)):
self.memory[x] = self.font_set[x]
def testing(self):
for num in range (0, len(self.registers)):
print("V" + str(num) + ": " + str(self.registers[num]))
print("I: " + str(self.register_I))
print("pc: " + str(self.pc))
print("sp: " + str(self.sp))
print("dt: " + str(self.delay_timer))
print("st: " + str(self.sound_timer))
def load_rom(self, rom_name):
"""
Checks if the user entered rom name exists in the proper directory. If the rom exists
and is a valid Chip-8 rom, it is stored into the proper addresses in the CPU memory.
"""
print("Loading %s..." % (rom_name))
os.chdir('..')
os.chdir('roms')
try:
rom = open(rom_name, "rb")
except IOError:
print("Rom does not exist, please enter a valid rom file.")
sys.exit()
else:
rom_bytes = rom.read()
# First 512 bytes are used by the Chip-8 font set.
if len(rom_bytes) > (4096 - 512):
print("Rom file is too large, please choose a valid rom file.")
#print(len(rom_string))
# Loads rom into memory starting from the address after the first 512 addresses
for byte in range(0, len(rom_bytes)):
self.memory[byte + self.pc] = rom_bytes[byte]
print("Done loading %s!" %(rom_name))
rom.close()
def timer_decrement(self):
if self.delay_timer != 0:
self.delay_timer -= 1
if self.sound_timer != 0:
self.sound_timer -= 1
def get_opcode(self):
"""
Combines bytes in adjacent memory addresses to create a 2 byte long opcode. Left shifts the
first byte by 8 bits and performs a bitwise OR operation to change the created mask into
the values of the second byte.
"""
first_byte = self.memory[self.pc]
second_byte = self.memory[self.pc + 1]
opcode = (first_byte << 8 | second_byte)
return opcode
def perform_opcode(self, opcode):
"""
Decodes the given opcode by identifying the first hexidecimal value. If required, the last
hexidecimal value is also identified and the decoded opcode is performed. The pc is then
advanced based on the opcode.
"""
# Identify first hex to determine which opcode nibble to perform
first_hex = opcode & 0xF000
if first_hex == 0x0000:
last_hex = opcode & 0x000F
# Opcode 00E0: clear screen
if last_hex == 0x0000:
self.display.clear_display()
self.draw = True
self.pc += 2
# Opcode 00EE: returns from subroutine
elif last_hex == 0x000E:
self.sp -= 1
self.pc = self.stack[self.sp]
#self.sp -= 1
self.pc += 2
# Opcode 1NNN: Jump to address NNN
elif first_hex == 0x1000:
# Get last 3 hex values
address = opcode & 0x0FFF
self.pc = address
# Opcode 2NNN: Call subroutine at NNN
# Adds current pc to stack and increments sp
elif first_hex == 0x2000:
address = opcode & 0x0FFF
self.stack[self.sp] = self.pc
self.sp += 1
self.pc = address
# Opcode 3XKK: Skips next instruction if value stored in register X = KK
elif first_hex == 0x3000:
if (self.registers[(opcode & 0x0F00) >> 8] == (opcode & 0x00FF)):
self.pc += 4
else:
self.pc += 2
# Opcode 4XKK: Skips next instruction if value stored in register X != KK
elif first_hex == 0x4000:
if (self.registers[(opcode & 0x0F00) >> 8] != (opcode & 0x00FF)):
self.pc += 4
else:
self.pc += 2
# Opcode 5XY0: Skips next instruction if value stored in register X = value in register Y
elif first_hex == 0x5000:
if (self.registers[(opcode & 0x0F00) >> 8] == self.registers[(opcode & 0x00F0) >> 4]):
self.pc += 4
else:
self.pc += 2
# Opcode 6XKK: Load KK into register X
elif first_hex == 0x6000:
value = opcode & 0x00FF
self.registers[(opcode & 0x0F00) >> 8] = value
self.pc += 2
# Opcode 7XKK: Adds KK to the value in register X and stores it in register X
elif first_hex == 0x7000:
self.registers[(opcode & 0x0F00) >> 8] += (opcode & 0x00FF)
self.pc += 2
elif first_hex == 0x8000:
last_hex = opcode & 0x000F
# Opcode 8XY0: Set value of register X to the value of register Y
if last_hex == 0x000:
self.registers[(opcode & 0x0F00) >> 8] = self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY1: Set value of register X to (value of register X OR value of register Y)
elif last_hex == 0x001:
self.registers[(opcode & 0x0F00) >> 8] |= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY2: Set value of register X to (value of register X AND value of register Y)
elif last_hex == 0x002:
self.registers[(opcode & 0x0F00) >> 8] &= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY3: Set value of register X to (value of register X XOR value of register Y)
elif last_hex == 0x003:
self.registers[(opcode & 0x0F00) >> 8] ^= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY4: Set value of register X to (value of register X ADD value of register Y) and set carry
elif last_hex == 0x004:
value_sum = self.registers[(opcode & 0x0F00) >> 8] + self.registers[(opcode & 0x00F0) >> 4]
# Only keeps the lowest 8 bits if the sum is greater than 0xFF and sets the carry register to 1
if value_sum > 0xFF:
self.registers[0xF] = 1
self.registers[(opcode & 0x0F00) >> 8] = (value_sum & 0x00FF)
else:
self.registers[0xF] =0
self.registers[(opcode & 0x0F00) >> 8] = value_sum
self.pc += 2
# Opcode 8XY5: Set value of register X to (value of register X SUB value of register Y)
elif last_hex == 0x005:
# Sets carry register to 0 if there is a borrow else set to 1
if (self.registers[(opcode & 0x0F00) >> 8] > self.registers[(opcode & 0x00F0) >> 4]):
self.registers[0xF] = 1
else:
self.registers[0xF] = 0
self.registers[(opcode & 0x0F00) >> 8] -= self.registers[(opcode & 0x00F0) >> 4]
self.pc += 2
# Opcode 8XY6: Right shift the value of register X by 1
elif last_hex == 0x006:
# Keeps the least significant bit of the value of register X in register F
self.registers[0xF] = (self.registers[(opcode & 0x0F00) >> 8] & 0x0001)
self.registers[(opcode & 0x0F00) >> 8] = (self.registers[(opcode & 0x0F00) >> 8] >> 1)
self.pc += 2
# Opcode 8XY7: Set value of register X to (value of register Y SUB value of register X)
elif last_hex == 0x007:
# Sets carry register to 0 if there is a borrow else set to 1
if (self.registers[(opcode & 0x0F00) >> 8] < self.registers[(opcode & 0x00F0) >> 4]):
self.registers[0xF] = 1
else:
self.registers[0xF] = 0
self.registers[(opcode & 0x0F00) >> 8] = self.registers[(opcode & 0x00F0) >> 4] - self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode 8XYE: Left shift the value of register X by 1
elif last_hex == 0x00E:
# Keeps the most significant bit of the value of register X in register F
self.registers[0xF] = (self.registers[(opcode & 0x0F00) >> 8] >> 7)
self.registers[(opcode & 0x0F00) >> 8] = (self.registers[(opcode & 0x0F00) >> 8] << 1)
self.pc += 2
# Opcode 9XY0: Skip next instruction if value of register X != value of register Y
elif first_hex == 0x9000:
if self.registers[(opcode & 0x0F00) >> 8] != self.registers[(opcode & 0x00F0) >> 4]:
self.pc += 4
else:
self.pc += 2
# Opcode ANNN: Set value of register I to NNN
elif first_hex == 0xA000:
self.register_I = (opcode & 0x0FFF)
self.pc += 2
# Opcode BNNN: Jump to location NNN + value of register 0
elif first_hex == 0xB000:
self.pc = (opcode & 0x0FFF) + self.registers[0]
# Opcode CXKK: Sets the value of register X to (random byte AND KK)
elif first_hex == 0xC000:
random_byte = random.randint(0, 255)
self.registers[(opcode & 0x0F00) >> 8] = (random_byte & (opcode & 0x00FF))
self.pc += 2
# Opcode DXYN: Display an N-byte sprite starting at memory location I at (value of register X, value of register Y)
# If the pixel of the sprite would go past the edge of the screen, wrap it around instead. Sprites are N pixels tall
# and 8 pixels wide on the standard CHIP-8. Drawing works by performing an XOR on a pixel on the screen with a given
# bit. Set value of register F to 1 if collision else set it to 0
elif first_hex == 0xD000:
height = opcode & 0x000F
x_coord = self.registers[(opcode & 0x0F00) >> 8]
y_coord = self.registers[(opcode & 0x00F0) >> 4]
location = self.register_I
self.registers[0xF] = 0
sprite_list = []
print(str(height))
for offset in range(0, height):
sprite_bits = []
sprite_byte = self.memory[location + offset]
sprite_byte = (bin(sprite_byte)[2:]).zfill(8)
for bit in sprite_byte:
sprite_bits.append(bit)
sprite_list.append(sprite_bits)
"""
for sprite in sprite_list:
print(str(sprite))
"""
for sprite in range(len(sprite_list)):
increment = 0
for pixel in sprite_list[sprite]:
screen_pixel = self.display.check_pixel((x_coord + increment) % self.display.width, (y_coord + sprite) % self.display.height)
pixel_color = int(pixel)
if pixel_color == 1 and screen_pixel == 1:
self.registers[0xF] = 1
pixel_color = 0
elif pixel_color == 0 and screen_pixel == 1:
pixel_color = 1
self.display.set_pixel((x_coord + increment) % self.display.width, (y_coord + sprite) % self.display.height, pixel_color)
increment += 1
self.draw = True
self.pc += 2
elif first_hex == 0xE000:
last_hex = opcode & 0x000F
# TODO implement pygame keys
# Opcode EX9E: Skips the next instruction if key with the value of register X is pressed
if last_hex == 0x000E:
if self.keys[(opcode & 0x0F00) >> 8] != 0:
self.pc += 4
else:
self.pc += 2
# Opcode EXA1: Skips the next instruction if key with the value of register X is not pressed
if last_hex == 0x0001:
if self.keys[(opcode & 0x0F00) >> 8] == 0:
self.pc += 4
else:
self.pc +=2
elif first_hex == 0xF000:
last_hex = opcode & 0x000F
# Opcode FX07: Set the value of register X to the value of the delay timer
if last_hex == 0x0007:
self.registers[(opcode & 0x0F00) >> 8] = self.delay_timer
self.pc += 2
# TODO implement pygame keys
# Opcode FX0A: Wait for a key press and stores the value of the pressed key into register X
if last_hex == 0x000A:
key_was_pressed = False
while key_was_pressed is not True:
for key in range(0, len(self.keys)):
if key is not 0:
self.registers[(opcode & 0x0F00) >> 8] = key
key_was_pressed = True
self.pc += 2
# Opcode FX15: Set the value of the delay timer to the value of register X
if (opcode & 0x00FF) == 0x0015:
self.delay_timer = self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX18: Set the value of the sound timer to the value of register X
if last_hex == 0x0008:
self.sound_timer = self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX1E: Set the value of register I to (value of register I + value of register X)
if last_hex == 0x000E:
self.register_I += self.registers[(opcode & 0x0F00) >> 8]
self.pc += 2
# Opcode FX29: Set value of register I to the location of sprite for the digit of the value of register X
# Sprites are 5 bytes long so the value of register X must be multiplied by 5
if last_hex == 0x0009:
self.register_I = self.registers[(opcode & 0x0F00) >> 8] * 0x5
self.pc += 2
# Opcode FX33: Store the binary-coded decimal representation of the value of register X in memory locations I, I+1, and I+2
if last_hex == 0x0003:
value = self.registers[(opcode & 0x0F00) >> 8]
difference = 2
while difference >= 0:
self.memory[self.register_I + difference] = value % 10
value = value // 10
difference -= 1
self.pc += 2
# Opcode Fx55: Store the values of register 0 through X in memory starting in location of the value of register I
if (opcode & 0x00FF) == 0x0055:
location = 0
end = (opcode & 0x0F00) >> 8
while location <= end:
self.memory[self.register_I + location] = self.registers[location]
location += 1
self.pc += 2
# Opcode FX65: Load the registers 0 through X with values starting from the address of the value of register I
if (opcode & 0x00FF) == 0x0065:
location = 0
end = (opcode & 0x0F00) >> 8
while location <= end:
self.registers[location] = self.memory[self.register_I + location]
location += 1
self.pc += 2
else:
print("Invalid opcode, chippy will now quit")
quit()
def perform_cycle(self):
current_opcode = self.get_opcode()
print(hex(current_opcode))
if self.test == True:
self.testing()
self.perform_opcode(current_opcode)
self.timer_decrement()
if self.draw == True:
self.display.update_display()
self.draw = False
| 0.007022 |
"""
For a given aws account, go through all un-attached volumes and tag them.
"""
import boto
import boto.utils
import argparse
import logging
import subprocess
import time
import os
from os.path import join, exists, isdir, islink, realpath, basename, dirname
import yaml
# needs to be pip installed
import netaddr
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
log_level = logging.INFO
def tags_for_hostname(hostname, mapping):
logging.debug("Hostname is {}".format(hostname))
if not hostname.startswith('ip-'):
return {}
octets = hostname.lstrip('ip-').split('-')
tags = {}
# Update with env and deployment info
tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]])
ip_addr = netaddr.IPAddress(".".join(octets))
for key, value in mapping['CIDR_REST'].items():
cidr = ".".join([
mapping['CIDR_FIRST_OCTET'],
octets[1],
key])
cidrset = netaddr.IPSet([cidr])
if ip_addr in cidrset:
tags.update(value)
return tags
def potential_devices(root_device):
device_dir = dirname(root_device)
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
all_devices = filter(relevant_devices, all_devices)
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
return map(lambda x: join(device_dir, x), all_devices)
def get_tags_for_disk(mountpoint):
tag_data = {}
# Look at some files on it to determine:
# - hostname
# - environment
# - deployment
# - cluster
# - instance-id
# - date created
hostname_file = join(mountpoint, "etc", "hostname")
edx_dir = join(mountpoint, 'edx', 'app')
if exists(hostname_file):
# This means this was a root volume.
with open(hostname_file, 'r') as f:
hostname = f.readline().strip()
tag_data['hostname'] = hostname
if exists(edx_dir) and isdir(edx_dir):
# This is an ansible related ami, we'll try to map
# the hostname to a knows deployment and cluster.
cluster_tags = tags_for_hostname(hostname, mappings)
tag_data.update(cluster_tags)
else:
# Not an ansible created root volume.
tag_data['cluster'] = 'unknown'
else:
# Not a root volume
tag_data['cluster'] = "unknown"
instance_file = join(mountpoint, "var", "lib", "cloud", "instance")
if exists(instance_file) and islink(instance_file):
resolved_path = realpath(instance_file)
old_instance_id = basename(resolved_path)
tag_data['instance-id'] = old_instance_id
return tag_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.")
parser.add_argument("--profile", '-p',
help="AWS Profile to use with boto.")
parser.add_argument("--noop", "-n", action="store_true",
help="Don't actually tag anything.")
parser.add_argument("--verbose", "-v", action="store_true",
help="More verbose output.")
parser.add_argument("--device", "-d", default="/dev/xvdf",
help="The /dev/??? where the volume should be mounted.")
parser.add_argument("--mountpoint", "-m", default="/mnt",
help="Location to mount the new device.")
parser.add_argument("--config", "-c", required=True,
help="Configuration to map hostnames to tags.")
# The config should specify what tags to associate with the second
# and this octet of the hostname which should be the ip address.
# example:
args = parser.parse_args()
mappings = yaml.safe_load(open(args.config,'r'))
# Setup Logging
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=log_level)
# setup boto
ec2 = boto.connect_ec2(profile_name=args.profile)
# get mounting args
id_info = boto.utils.get_instance_identity()['document']
instance_id = id_info['instanceId']
az = id_info['availabilityZone']
root_device = args.device
mountpoint = args.mountpoint
# Find all unattached volumes
filters = { "status": "available", "availability-zone": az }
potential_volumes = ec2.get_all_volumes(filters=filters)
logging.debug("Found {} unattached volumes in {}".format(len(potential_volumes), az))
for vol in potential_volumes:
if "cluster" in vol.tags:
continue
# Attach volume to the instance running this process
logging.debug("Trying to attach {} to {} at {}".format(
vol.id, instance_id, root_device))
try:
ec2.attach_volume(vol.id, instance_id, root_device)
# Wait for the volume to finish attaching.
waiting_msg = "Waiting for {} to be available at {}"
timeout = TIMEOUT
while not exists(root_device):
time.sleep(2)
logging.debug(waiting_msg.format(vol.id, root_device))
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while attaching {}.".format(vol.id))
exit(1)
# Because a volume might have multiple mount points
devices_on_volume = potential_devices(root_device)
if len(devices_on_volume) != 1:
vol.add_tag("devices_on_volume", str(devices_on_volume))
# Don't tag in this case because the different devices
# may have conflicting tags.
logging.info("Skipping {} because it has multiple mountpoints.".format(vol.id))
logging.info("{} has mountpoints {}".format(vol.id, str(devices_on_volume)))
else:
device = devices_on_volume[0]
try:
# Mount the volume
subprocess.check_call(["sudo", "mount", device, mountpoint])
# Learn all tags we can know from content on disk.
tag_data = get_tags_for_disk(mountpoint)
tag_data['created'] = vol.create_time
# If they are found tag the instance with them
if args.noop:
logging.info("Would have tagged {} with: \n{}".format(vol.id, str(tag_data)))
else:
logging.info("Tagging {} with: \n{}".format(vol.id, str(tag_data)))
vol.add_tags(tag_data)
finally:
# Un-mount the volume
subprocess.check_call(['sudo', 'umount', mountpoint])
finally:
# Need this to be a function so we always re-check the API for status.
is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available"
timeout = TIMEOUT
while exists(root_device) or is_attached(vol.id):
if is_attached(vol.id):
try:
# detach the volume
ec2.detach_volume(vol.id)
except boto.exception.EC2ResponseError as e:
logging.warning("Failed to detach volume. Will try again in a bit.")
time.sleep(2)
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while detaching {}.".format(vol.id))
exit(1)
logging.debug("Waiting for {} to be detached.".format(vol.id))
| 0.003777 |
from collections import defaultdict
from itertools import product, chain
from math import sqrt, floor, ceil
from PyQt4.QtCore import Qt, QSize
from PyQt4.QtGui import (QGraphicsScene, QGraphicsView, QColor, QPen, QBrush,
QDialog, QApplication, QSizePolicy)
import Orange
from Orange.data import Table, filter
from Orange.data.sql.table import SqlTable, LARGE_TABLE, DEFAULT_SAMPLE_TIME
from Orange.statistics.contingency import get_contingency
from Orange.widgets import gui
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils import getHtmlCompatibleString
from Orange.widgets.utils.itemmodels import VariableListModel
from Orange.widgets.visualize.owmosaic import (OWCanvasText, OWCanvasRectangle,
OWCanvasLine)
from Orange.widgets.widget import OWWidget, Default, AttributeList
class _ViewWithPress(QGraphicsView):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop("handler")
super().__init__(*args, **kwargs)
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if not ev.isAccepted():
self.handler()
class OWSieveDiagram(OWWidget):
name = "Sieve Diagram"
description = "A two-way contingency table providing information in " \
"relation to expected frequency of combination of feature " \
"values under independence."
icon = "icons/SieveDiagram.svg"
priority = 4200
inputs = [("Data", Table, "set_data", Default),
("Features", AttributeList, "set_input_features")]
outputs = [("Selection", Table)]
graph_name = "canvas"
want_control_area = False
settingsHandler = DomainContextHandler()
attrX = ContextSetting("")
attrY = ContextSetting("")
selection = ContextSetting(set())
def __init__(self):
super().__init__()
self.data = None
self.input_features = None
self.attrs = []
self.attr_box = gui.hBox(self.mainArea)
model = VariableListModel()
model.wrap(self.attrs)
self.attrXCombo = gui.comboBox(
self.attr_box, self, value="attrX", contentsLength=12,
callback=self.change_attr, sendSelectedValue=True, valueType=str)
self.attrXCombo.setModel(model)
gui.widgetLabel(self.attr_box, "\u2715").\
setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.attrYCombo = gui.comboBox(
self.attr_box, self, value="attrY", contentsLength=12,
callback=self.change_attr, sendSelectedValue=True, valueType=str)
self.attrYCombo.setModel(model)
self.canvas = QGraphicsScene()
self.canvasView = _ViewWithPress(self.canvas, self.mainArea,
handler=self.reset_selection)
self.mainArea.layout().addWidget(self.canvasView)
self.canvasView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvasView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
box = gui.hBox(self.mainArea)
gui.button(box, None, "&Save Graph",
callback=self.save_graph, autoDefault=False)
gui.button(box, None, "&Report",
callback=self.show_report, autoDefault=False)
def sizeHint(self):
return QSize(450, 550)
def set_data(self, data):
if type(data) == SqlTable and data.approx_len() > LARGE_TABLE:
data = data.sample_time(DEFAULT_SAMPLE_TIME)
self.closeContext()
self.data = data
self.areas = []
if self.data is None:
self.attrs[:] = []
else:
self.attrs[:] = [
var for var in chain(self.data.domain.attributes,
self.data.domain.metas)
if var.is_discrete
]
if self.attrs:
self.attrX = self.attrs[0].name
self.attrY = self.attrs[len(self.attrs) > 1].name
else:
self.attrX = self.attrY = None
self.openContext(self.data)
self.information(0, "")
if data and any(attr.is_continuous for attr in data.domain):
self.information(0, "Data contains continuous variables. "
"Discretize the data to use them.")
self.resolve_shown_attributes()
self.update_selection()
def change_attr(self):
self.selection = set()
self.updateGraph()
self.update_selection()
def set_input_features(self, attrList):
self.input_features = attrList
self.resolve_shown_attributes()
self.update_selection()
def resolve_shown_attributes(self):
self.warning(1)
self.attr_box.setEnabled(True)
if self.input_features: # non-None and non-empty!
features = [f for f in self.input_features if f in self.attrs]
if not features:
self.warning(1, "Features from the input signal "
"are not present in the data")
else:
old_attrs = self.attrX, self.attrY
self.attrX, self.attrY = [f.name for f in (features * 2)[:2]]
self.attr_box.setEnabled(False)
if (self.attrX, self.attrY) != old_attrs:
self.selection = set()
# else: do nothing; keep current features, even if input with the
# features just changed to None
self.updateGraph()
def resizeEvent(self, e):
OWWidget.resizeEvent(self,e)
self.updateGraph()
def showEvent(self, ev):
OWWidget.showEvent(self, ev)
self.updateGraph()
def reset_selection(self):
self.selection = set()
self.update_selection()
def select_area(self, area, ev):
if ev.button() != Qt.LeftButton:
return
index = self.areas.index(area)
if ev.modifiers() & Qt.ControlModifier:
self.selection ^= {index}
else:
self.selection = {index}
self.update_selection()
def update_selection(self):
if self.areas is None or not self.selection:
self.send("Selection", None)
return
filters = []
for i, area in enumerate(self.areas):
if i in self.selection:
width = 4
val_x, val_y = area.value_pair
filters.append(
filter.Values([
filter.FilterDiscrete(self.attrX, [val_x]),
filter.FilterDiscrete(self.attrY, [val_y])
]))
else:
width = 1
pen = area.pen()
pen.setWidth(width)
area.setPen(pen)
if len(filters) == 1:
filters = filters[0]
else:
filters = filter.Values(filters, conjunction=False)
self.send("Selection", filters(self.data))
# -----------------------------------------------------------------------
# Everything from here on is ancient and has been changed only according
# to what has been changed above. Some clean-up may be in order some day
#
def updateGraph(self, *args):
for item in self.canvas.items():
self.canvas.removeItem(item)
if self.data is None or len(self.data) == 0 or \
self.attrX is None or self.attrY is None:
return
data = self.data[:, [self.attrX, self.attrY]]
valsX = []
valsY = []
contX = get_contingency(data, self.attrX, self.attrX)
contY = get_contingency(data, self.attrY, self.attrY)
# compute contingency of x and y attributes
for entry in contX:
sum_ = 0
try:
for val in entry: sum_ += val
except: pass
valsX.append(sum_)
for entry in contY:
sum_ = 0
try:
for val in entry: sum_ += val
except: pass
valsY.append(sum_)
contXY = self.getConditionalDistributions(data, [data.domain[self.attrX], data.domain[self.attrY]])
# compute probabilities
probs = {}
for i in range(len(valsX)):
valx = valsX[i]
for j in range(len(valsY)):
valy = valsY[j]
actualProb = 0
try:
actualProb = contXY['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
# for val in contXY['%s-%s' %(i, j)]: actualProb += val
except:
actualProb = 0
probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])] = ((data.domain[self.attrX].values[i], valx), (data.domain[self.attrY].values[j], valy), actualProb, len(data))
#get text width of Y labels
max_ylabel_w = 0
for j in range(len(valsY)):
xl = OWCanvasText(self.canvas, "", 0, 0, htmlText = getHtmlCompatibleString(data.domain[self.attrY].values[j]), show=False)
max_ylabel_w = max(int(xl.boundingRect().width()), max_ylabel_w)
max_ylabel_w = min(max_ylabel_w, 200) #upper limit for label widths
# get text width of Y attribute name
text = OWCanvasText(self.canvas, data.domain[self.attrY].name, x = 0, y = 0, bold = 1, show = 0, vertical=True)
xOff = int(text.boundingRect().height() + max_ylabel_w)
yOff = 55
sqareSize = min(self.canvasView.width() - xOff - 35, self.canvasView.height() - yOff - 50)
sqareSize = max(sqareSize, 10)
self.canvasView.setSceneRect(0, 0, self.canvasView.width(), self.canvasView.height())
# print graph name
name = "<b>P(%s, %s) ≠ P(%s)×P(%s)</b>" %(self.attrX, self.attrY, self.attrX, self.attrY)
OWCanvasText(self.canvas, "" , xOff+ sqareSize/2, 20, Qt.AlignCenter, htmlText = name)
OWCanvasText(self.canvas, "N = " + str(len(data)), xOff+ sqareSize/2, 38, Qt.AlignCenter, bold = 0)
######################
# compute chi-square
chisquare = 0.0
for i in range(len(valsX)):
for j in range(len(valsY)):
((xAttr, xVal), (yAttr, yVal), actual, sum_) = probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
expected = float(xVal*yVal)/float(sum_)
if expected == 0: continue
pearson2 = (actual - expected)*(actual - expected) / expected
chisquare += pearson2
######################
# draw rectangles
currX = xOff
max_xlabel_h = 0
normX, normY = sum(valsX), sum(valsY)
self.areas = []
for i in range(len(valsX)):
if valsX[i] == 0: continue
currY = yOff
width = int(float(sqareSize * valsX[i])/float(normX))
for j in range(len(valsY)-1, -1, -1): # this way we sort y values correctly
((xAttr, xVal), (yAttr, yVal), actual, sum_) = probs['%s-%s' %(data.domain[self.attrX].values[i], data.domain[self.attrY].values[j])]
if valsY[j] == 0: continue
height = int(float(sqareSize * valsY[j])/float(normY))
# create rectangle
selected = len(self.areas) in self.selection
rect = OWCanvasRectangle(
self.canvas, currX+2, currY+2, width-4, height-4, z = -10,
onclick=self.select_area)
rect.value_pair = i, j
self.areas.append(rect)
self.addRectIndependencePearson(rect, currX+2, currY+2, width-4, height-4, (xAttr, xVal), (yAttr, yVal), actual, sum_,
width=1 + 3 * selected, # Ugly! This is needed since
# resize redraws the graph! When this is handled by resizing
# just the viewer, update_selection will take care of this
)
expected = float(xVal*yVal)/float(sum_)
pearson = (actual - expected) / sqrt(expected)
tooltipText = """<b>X Attribute: %s</b><br>Value: <b>%s</b><br>Number of instances (p(x)): <b>%d (%.2f%%)</b><hr>
<b>Y Attribute: %s</b><br>Value: <b>%s</b><br>Number of instances (p(y)): <b>%d (%.2f%%)</b><hr>
<b>Number Of Instances (Probabilities):</b><br>Expected (p(x)p(y)): <b>%.1f (%.2f%%)</b><br>Actual (p(x,y)): <b>%d (%.2f%%)</b>
<hr><b>Statistics:</b><br>Chi-square: <b>%.2f</b><br>Standardized Pearson residual: <b>%.2f</b>""" %(self.attrX, getHtmlCompatibleString(xAttr), xVal, 100.0*float(xVal)/float(sum_), self.attrY, getHtmlCompatibleString(yAttr), yVal, 100.0*float(yVal)/float(sum_), expected, 100.0*float(xVal*yVal)/float(sum_*sum_), actual, 100.0*float(actual)/float(sum_), chisquare, pearson )
rect.setToolTip(tooltipText)
currY += height
if currX == xOff:
OWCanvasText(self.canvas, "", xOff, currY - height/2, Qt.AlignRight | Qt.AlignVCenter, htmlText = getHtmlCompatibleString(data.domain[self.attrY].values[j]))
xl = OWCanvasText(self.canvas, "", currX + width/2, yOff + sqareSize, Qt.AlignHCenter | Qt.AlignTop, htmlText = getHtmlCompatibleString(data.domain[self.attrX].values[i]))
max_xlabel_h = max(int(xl.boundingRect().height()), max_xlabel_h)
currX += width
# show attribute names
OWCanvasText(self.canvas, self.attrY, 0, yOff + sqareSize/2, Qt.AlignLeft | Qt.AlignVCenter, bold = 1, vertical=True)
OWCanvasText(self.canvas, self.attrX, xOff + sqareSize/2, yOff + sqareSize + max_xlabel_h, Qt.AlignHCenter | Qt.AlignTop, bold = 1)
# create a dictionary with all possible pairs of "combination-of-attr-values" : count
def getConditionalDistributions(self, data, attrs):
cond_dist = defaultdict(int)
all_attrs = [data.domain[a] for a in attrs]
if data.domain.class_var is not None:
all_attrs.append(data.domain.class_var)
for i in range(1, len(all_attrs) + 1):
attr = all_attrs[:i]
if type(data) == SqlTable:
# make all possible pairs of attributes + class_var
attr = [a.to_sql() for a in attr]
fields = attr + ["COUNT(*)"]
query = data._sql_query(fields, group_by=attr)
with data._execute_sql_query(query) as cur:
res = cur.fetchall()
for r in res:
str_values =[a.repr_val(a.to_val(x)) for a, x in zip(all_attrs, r[:-1])]
str_values = [x if x != '?' else 'None' for x in str_values]
cond_dist['-'.join(str_values)] = r[-1]
else:
for indices in product(*(range(len(a.values)) for a in attr)):
vals = []
conditions = []
for k, ind in enumerate(indices):
vals.append(attr[k].values[ind])
fd = Orange.data.filter.FilterDiscrete(column=attr[k], values=[attr[k].values[ind]])
conditions.append(fd)
filt = Orange.data.filter.Values(conditions)
filtdata = filt(data)
cond_dist['-'.join(vals)] = len(filtdata)
return cond_dist
######################################################################
## show deviations from attribute independence with standardized pearson residuals
def addRectIndependencePearson(self, rect, x, y, w, h, xAttr_xVal, yAttr_yVal, actual, sum, width):
xAttr, xVal = xAttr_xVal
yAttr, yVal = yAttr_yVal
expected = float(xVal*yVal)/float(sum)
pearson = (actual - expected) / sqrt(expected)
if pearson > 0: # if there are more examples that we would expect under the null hypothesis
intPearson = floor(pearson)
pen = QPen(QColor(0,0,255), width); rect.setPen(pen)
b = 255
r = g = 255 - intPearson*20
r = g = max(r, 55) #
elif pearson < 0:
intPearson = ceil(pearson)
pen = QPen(QColor(255,0,0), width)
rect.setPen(pen)
r = 255
b = g = 255 + intPearson*20
b = g = max(b, 55)
else:
pen = QPen(QColor(255,255,255), width)
r = g = b = 255 # white
color = QColor(r,g,b)
brush = QBrush(color); rect.setBrush(brush)
if pearson > 0:
pearson = min(pearson, 10)
kvoc = 1 - 0.08 * pearson # if pearson in [0..10] --> kvoc in [1..0.2]
else:
pearson = max(pearson, -10)
kvoc = 1 - 0.4*pearson
self.addLines(x,y,w,h, kvoc, pen)
##################################################
# add lines
def addLines(self, x,y,w,h, diff, pen):
if w == 0 or h == 0: return
# create lines
dist = 20 # original distance between two lines in pixels
dist = dist * diff
temp = dist
while (temp < w):
OWCanvasLine(self.canvas, temp+x, y, temp+x, y+h, 1, pen.color())
temp += dist
temp = dist
while (temp < h):
OWCanvasLine(self.canvas, x, y+temp, x+w, y+temp, 1, pen.color())
temp += dist
def closeEvent(self, ce):
QDialog.closeEvent(self, ce)
def get_widget_name_extension(self):
if self.data is not None:
return "{} vs {}".format(self.attrX, self.attrY)
def send_report(self):
self.report_plot()
# test widget appearance
if __name__=="__main__":
import sys
a=QApplication(sys.argv)
ow=OWSieveDiagram()
ow.show()
data = Table(r"zoo.tab")
ow.set_data(data)
a.exec_()
ow.saveSettings()
| 0.006135 |
"""
Views related to course tabs
"""
from access import has_course_access
from util.json_request import expect_json, JsonResponse
from django.http import HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import BlockUsageLocator
from xmodule.tabs import CourseTabList, StaticTab, CourseTab, InvalidTabsException
from ..utils import get_modulestore, get_lms_link_for_item
__all__ = ['tabs_handler']
@expect_json
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def tabs_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for static tabs.
GET
html: return page for editing static tabs
json: not supported
PUT or POST
json: update the tab order. It is expected that the request body contains a JSON-encoded dict with entry "tabs".
The value for "tabs" is an array of tab locators, indicating the desired order of the tabs.
Creating a tab, deleting a tab, or changing its contents is not supported through this method.
Instead use the general xblock URL (see item.xblock_handler).
"""
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
if not has_course_access(request.user, locator):
raise PermissionDenied()
old_location = loc_mapper().translate_locator_to_location(locator)
store = get_modulestore(old_location)
course_item = store.get_item(old_location)
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
if 'tabs' in request.json:
return reorder_tabs_handler(course_item, request)
elif 'tab_id_locator' in request.json:
return edit_tab_handler(course_item, request)
else:
raise NotImplementedError('Creating or changing tab content is not supported.')
elif request.method == 'GET': # assume html
# get all tabs from the tabs list: static tabs (a.k.a. user-created tabs) and built-in tabs
# present in the same order they are displayed in LMS
tabs_to_render = []
for tab in CourseTabList.iterate_displayable_cms(
course_item,
settings,
):
if isinstance(tab, StaticTab):
# static tab needs its locator information to render itself as an xmodule
static_tab_loc = old_location.replace(category='static_tab', name=tab.url_slug)
tab.locator = loc_mapper().translate_location(
course_item.location.course_id, static_tab_loc, False, True
)
tabs_to_render.append(tab)
return render_to_response('edit-tabs.html', {
'context_course': course_item,
'tabs_to_render': tabs_to_render,
'course_locator': locator,
'lms_link': get_lms_link_for_item(course_item.location),
})
else:
return HttpResponseNotFound()
def reorder_tabs_handler(course_item, request):
"""
Helper function for handling reorder of tabs request
"""
# Tabs are identified by tab_id or locators.
# The locators are used to identify static tabs since they are xmodules.
# Although all tabs have tab_ids, newly created static tabs do not know
# their tab_ids since the xmodule editor uses only locators to identify new objects.
requested_tab_id_locators = request.json['tabs']
# original tab list in original order
old_tab_list = course_item.tabs
# create a new list in the new order
new_tab_list = []
for tab_id_locator in requested_tab_id_locators:
tab = get_tab_by_tab_id_locator(old_tab_list, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
new_tab_list.append(tab)
# the old_tab_list may contain additional tabs that were not rendered in the UI because of
# global or course settings. so add those to the end of the list.
non_displayed_tabs = set(old_tab_list) - set(new_tab_list)
new_tab_list.extend(non_displayed_tabs)
# validate the tabs to make sure everything is Ok (e.g., did the client try to reorder unmovable tabs?)
try:
CourseTabList.validate_tabs(new_tab_list)
except InvalidTabsException, exception:
return JsonResponse(
{"error": "New list of tabs is not valid: {0}.".format(str(exception))}, status=400
)
# persist the new order of the tabs
course_item.tabs = new_tab_list
modulestore('direct').update_item(course_item, request.user.id)
return JsonResponse()
def edit_tab_handler(course_item, request):
"""
Helper function for handling requests to edit settings of a single tab
"""
# Tabs are identified by tab_id or locator
tab_id_locator = request.json['tab_id_locator']
# Find the given tab in the course
tab = get_tab_by_tab_id_locator(course_item.tabs, tab_id_locator)
if tab is None:
return JsonResponse(
{"error": "Tab with id_locator '{0}' does not exist.".format(tab_id_locator)}, status=400
)
if 'is_hidden' in request.json:
# set the is_hidden attribute on the requested tab
tab.is_hidden = request.json['is_hidden']
modulestore('direct').update_item(course_item, request.user.id)
else:
raise NotImplementedError('Unsupported request to edit tab: {0}'.format(request.json))
return JsonResponse()
def get_tab_by_tab_id_locator(tab_list, tab_id_locator):
"""
Look for a tab with the specified tab_id or locator. Returns the first matching tab.
"""
if 'tab_id' in tab_id_locator:
tab = CourseTabList.get_tab_by_id(tab_list, tab_id_locator['tab_id'])
elif 'tab_locator' in tab_id_locator:
tab = get_tab_by_locator(tab_list, tab_id_locator['tab_locator'])
return tab
def get_tab_by_locator(tab_list, tab_locator):
"""
Look for a tab with the specified locator. Returns the first matching tab.
"""
tab_location = loc_mapper().translate_locator_to_location(BlockUsageLocator(tab_locator))
item = modulestore('direct').get_item(tab_location)
static_tab = StaticTab(
name=item.display_name,
url_slug=item.location.name,
)
return CourseTabList.get_tab_by_id(tab_list, static_tab.tab_id)
# "primitive" tab edit functions driven by the command line.
# These should be replaced/deleted by a more capable GUI someday.
# Note that the command line UI identifies the tabs with 1-based
# indexing, but this implementation code is standard 0-based.
def validate_args(num, tab_type):
"Throws for the disallowed cases."
if num <= 1:
raise ValueError('Tabs 1 and 2 cannot be edited')
if tab_type == 'static_tab':
raise ValueError('Tabs of type static_tab cannot be edited here (use Studio)')
def primitive_delete(course, num):
"Deletes the given tab number (0 based)."
tabs = course.tabs
validate_args(num, tabs[num].get('type', ''))
del tabs[num]
# Note for future implementations: if you delete a static_tab, then Chris Dodge
# points out that there's other stuff to delete beyond this element.
# This code happens to not delete static_tab so it doesn't come up.
modulestore('direct').update_item(course, '**replace_user**')
def primitive_insert(course, num, tab_type, name):
"Inserts a new tab at the given number (0 based)."
validate_args(num, tab_type)
new_tab = CourseTab.from_json({u'type': unicode(tab_type), u'name': unicode(name)})
tabs = course.tabs
tabs.insert(num, new_tab)
modulestore('direct').update_item(course, '**replace_user**')
| 0.002995 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkRulesOperations(object):
"""VirtualNetworkRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mariadb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkRule"
"""Gets a virtual network rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkRule, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
parameters, # type: "_models.VirtualNetworkRule"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkRule"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkRule"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
parameters, # type: "_models.VirtualNetworkRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkRule"]
"""Creates or updates an existing virtual network rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:param parameters: The requested virtual Network Rule Resource state.
:type parameters: ~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
virtual_network_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the virtual network rule with the given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param virtual_network_rule_name: The name of the virtual network rule.
:type virtual_network_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
virtual_network_rule_name=virtual_network_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'virtualNetworkRuleName': self._serialize.url("virtual_network_rule_name", virtual_network_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkRuleListResult"]
"""Gets a list of virtual network rules in a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mariadb.models.VirtualNetworkRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBForMariaDB/servers/{serverName}/virtualNetworkRules'} # type: ignore
| 0.004838 |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from glance.common import crypt
from glance.common import exception
import glance.context
import glance.db
from glance.openstack.common import uuidutils
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
CONF = cfg.CONF
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc'
UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
UUID1_LOCATION = 'file:///path/to/image'
UUID1_LOCATION_METADATA = {'key': 'value'}
UUID3_LOCATION = 'http://somehost.com/place'
CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c'
CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55'
def _db_fixture(id, **kwargs):
obj = {
'id': id,
'name': None,
'is_public': False,
'properties': {},
'checksum': None,
'owner': None,
'status': 'queued',
'tags': [],
'size': None,
'locations': [],
'protected': False,
'disk_format': None,
'container_format': None,
'deleted': False,
'min_ram': None,
'min_disk': None,
}
obj.update(kwargs)
return obj
def _db_image_member_fixture(image_id, member_id, **kwargs):
obj = {
'image_id': image_id,
'member': member_id,
}
obj.update(kwargs)
return obj
class TestImageRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self._create_images()
self._create_image_members()
def _create_images(self):
self.db.reset()
self.images = [
_db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM,
name='1', size=256,
is_public=True, status='active',
locations=[{'url': UUID1_LOCATION,
'metadata': UUID1_LOCATION_METADATA}]),
_db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1,
name='2', size=512, is_public=False),
_db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1,
name='3', size=1024, is_public=True,
locations=[{'url': UUID3_LOCATION,
'metadata': {}}]),
_db_fixture(UUID4, owner=TENANT4, name='4', size=2048),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID2, TENANT2),
_db_image_member_fixture(UUID2, TENANT3, status='accepted'),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_get(self):
image = self.image_repo.get(UUID1)
self.assertEquals(image.image_id, UUID1)
self.assertEquals(image.name, '1')
self.assertEquals(image.tags, set(['ping', 'pong']))
self.assertEquals(image.visibility, 'public')
self.assertEquals(image.status, 'active')
self.assertEquals(image.size, 256)
self.assertEquals(image.owner, TENANT1)
def test_location_value(self):
image = self.image_repo.get(UUID3)
self.assertEqual(image.locations[0]['url'], UUID3_LOCATION)
def test_location_data_value(self):
image = self.image_repo.get(UUID1)
self.assertEqual(image.locations[0]['url'], UUID1_LOCATION)
self.assertEqual(image.locations[0]['metadata'],
UUID1_LOCATION_METADATA)
def test_location_data_exists(self):
image = self.image_repo.get(UUID2)
self.assertEqual(image.locations, [])
def test_get_not_found(self):
self.assertRaises(exception.NotFound, self.image_repo.get,
uuidutils.generate_uuid())
def test_get_forbidden(self):
self.assertRaises(exception.NotFound, self.image_repo.get, UUID4)
def test_list(self):
images = self.image_repo.list()
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids)
def _do_test_list_status(self, status, expected):
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT3)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
images = self.image_repo.list(member_status=status)
self.assertEqual(expected, len(images))
def test_list_status(self):
self._do_test_list_status(None, 3)
def test_list_status_pending(self):
self._do_test_list_status('pending', 2)
def test_list_status_rejected(self):
self._do_test_list_status('rejected', 2)
def test_list_status_all(self):
self._do_test_list_status('all', 3)
def test_list_with_marker(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0])
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(actual_ids, full_ids[1:])
def test_list_with_last_marker(self):
images = self.image_repo.list()
marked_images = self.image_repo.list(marker=images[-1].image_id)
self.assertEqual(len(marked_images), 0)
def test_limited_list(self):
limited_images = self.image_repo.list(limit=2)
self.assertEqual(len(limited_images), 2)
def test_list_with_marker_and_limit(self):
full_images = self.image_repo.list()
full_ids = [i.image_id for i in full_images]
marked_images = self.image_repo.list(marker=full_ids[0], limit=1)
actual_ids = [i.image_id for i in marked_images]
self.assertEqual(actual_ids, full_ids[1:2])
def test_list_private_images(self):
filters = {'visibility': 'private'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID2]), image_ids)
def test_list_with_checksum_filter_single_image(self):
filters = {'checksum': CHECKSUM}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_checksum_filter_multiple_images(self):
filters = {'checksum': CHCKSUM1}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(2, len(image_ids))
self.assertEqual([UUID3, UUID2], image_ids)
def test_list_with_wrong_checksum(self):
WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd'
filters = {'checksum': WRONG_CHKSUM}
images = self.image_repo.list(filters=filters)
self.assertEquals(0, len(images))
def test_list_with_tags_filter_single_tag(self):
filters = {'tags': ['ping']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags(self):
filters = {'tags': ['ping', 'pong']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(1, len(image_ids))
self.assertEqual([UUID1], image_ids)
def test_list_with_tags_filter_multiple_tags_and_nonexistent(self):
filters = {'tags': ['ping', 'fake']}
images = self.image_repo.list(filters=filters)
image_ids = list([i.image_id for i in images])
self.assertEquals(0, len(image_ids))
def test_list_with_wrong_tags(self):
filters = {'tags': ['fake']}
images = self.image_repo.list(filters=filters)
self.assertEquals(0, len(images))
def test_list_public_images(self):
filters = {'visibility': 'public'}
images = self.image_repo.list(filters=filters)
image_ids = set([i.image_id for i in images])
self.assertEqual(set([UUID1, UUID3]), image_ids)
def test_sorted_list(self):
images = self.image_repo.list(sort_key='size', sort_dir='asc')
image_ids = [i.image_id for i in images]
self.assertEqual([UUID1, UUID2, UUID3], image_ids)
def test_add_image(self):
image = self.image_factory.new_image(name='added image')
self.assertEqual(image.updated_at, image.created_at)
self.image_repo.add(image)
retreived_image = self.image_repo.get(image.image_id)
self.assertEqual(retreived_image.name, 'added image')
self.assertEqual(retreived_image.updated_at, image.updated_at)
def test_save_image(self):
image = self.image_repo.get(UUID1)
original_update_time = image.updated_at
image.name = 'foo'
image.tags = ['king', 'kong']
self.image_repo.save(image)
current_update_time = image.updated_at
self.assertTrue(current_update_time > original_update_time)
image = self.image_repo.get(UUID1)
self.assertEqual(image.name, 'foo')
self.assertEqual(image.tags, set(['king', 'kong']))
self.assertEqual(image.updated_at, current_update_time)
def test_remove_image(self):
image = self.image_repo.get(UUID1)
previous_update_time = image.updated_at
self.image_repo.remove(image)
self.assertTrue(image.updated_at > previous_update_time)
self.assertRaises(exception.NotFound, self.image_repo.get, UUID1)
class TestEncryptedLocations(test_utils.BaseTestCase):
def setUp(self):
super(TestEncryptedLocations, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_factory = glance.domain.ImageFactory()
self.crypt_key = '0123456789abcdef'
self.config(metadata_encryption_key=self.crypt_key)
self.foo_bar_location = [{'url': 'foo', 'metadata': {}},
{'url': 'bar', 'metadata': {}}]
def test_encrypt_locations_on_add(self):
image = self.image_factory.new_image(UUID1)
image.locations = self.foo_bar_location
self.image_repo.add(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual(decrypted_locations,
[l['url'] for l in self.foo_bar_location])
def test_encrypt_locations_on_save(self):
image = self.image_factory.new_image(UUID1)
self.image_repo.add(image)
image.locations = self.foo_bar_location
self.image_repo.save(image)
db_data = self.db.image_get(self.context, UUID1)
self.assertNotEqual(db_data['locations'], ['foo', 'bar'])
decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url'])
for l in db_data['locations']]
self.assertEqual(decrypted_locations,
[l['url'] for l in self.foo_bar_location])
def test_decrypt_locations_on_get(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}} for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.get(UUID1)
self.assertEqual(image.locations, orig_locations)
def test_decrypt_locations_on_list(self):
url_loc = ['ping', 'pong']
orig_locations = [{'url': l, 'metadata': {}} for l in url_loc]
encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l)
for l in url_loc]
encrypted_locations = [{'url': l, 'metadata': {}}
for l in encrypted_locs]
self.assertNotEqual(encrypted_locations, orig_locations)
db_data = _db_fixture(UUID1, owner=TENANT1,
locations=encrypted_locations)
self.db.image_create(None, db_data)
image = self.image_repo.list()[0]
self.assertEqual(image.locations, orig_locations)
class TestImageMemberRepo(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberRepo, self).setUp()
self.db = unit_test_utils.FakeDB()
self.db.reset()
self.context = glance.context.RequestContext(
user=USER1, tenant=TENANT1)
self.image_repo = glance.db.ImageRepo(self.context, self.db)
self.image_member_factory = glance.domain.ImageMemberFactory()
self._create_images()
self._create_image_members()
image = self.image_repo.get(UUID1)
self.image_member_repo = glance.db.ImageMemberRepo(self.context,
self.db, image)
def _create_images(self):
self.images = [
_db_fixture(UUID1, owner=TENANT1, name='1', size=256,
status='active'),
_db_fixture(UUID2, owner=TENANT1, name='2',
size=512, is_public=False),
]
[self.db.image_create(None, image) for image in self.images]
self.db.image_tag_set_all(None, UUID1, ['ping', 'pong'])
def _create_image_members(self):
self.image_members = [
_db_image_member_fixture(UUID1, TENANT2),
_db_image_member_fixture(UUID1, TENANT3),
]
[self.db.image_member_create(None, image_member)
for image_member in self.image_members]
def test_list(self):
image_members = self.image_member_repo.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([TENANT2, TENANT3]), image_member_ids)
def test_list_no_members(self):
image = self.image_repo.get(UUID2)
self.image_member_repo_uuid2 = glance.db.ImageMemberRepo(
self.context, self.db, image)
image_members = self.image_member_repo_uuid2.list()
image_member_ids = set([i.member_id for i in image_members])
self.assertEqual(set([]), image_member_ids)
def test_save_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
image_member.status = 'accepted'
image_member_updated = self.image_member_repo.save(image_member)
self.assertTrue(image_member.id, image_member_updated.id)
self.assertEqual(image_member_updated.status, 'accepted')
def test_add_image_member(self):
image = self.image_repo.get(UUID1)
image_member = self.image_member_factory.new_image_member(image,
TENANT4)
self.assertTrue(image_member.id is None)
retreived_image_member = self.image_member_repo.add(image_member)
self.assertEqual(retreived_image_member.id, image_member.id)
self.assertEqual(retreived_image_member.image_id,
image_member.image_id)
self.assertEqual(retreived_image_member.member_id,
image_member.member_id)
self.assertEqual(retreived_image_member.status,
'pending')
def test_remove_image_member(self):
image_member = self.image_member_repo.get(TENANT2)
self.image_member_repo.remove(image_member)
self.assertRaises(exception.NotFound, self.image_member_repo.get,
TENANT2)
def test_remove_image_member_does_not_exist(self):
image = self.image_repo.get(UUID2)
fake_member = glance.domain.ImageMemberFactory()\
.new_image_member(image, TENANT4)
self.assertRaises(exception.NotFound, self.image_member_repo.remove,
fake_member)
| 0.000672 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_partner_balance(osv.osv_memory):
"""
This wizard will provide the partner balance report by periods, between any two dates.
"""
_inherit = 'account.common.partner.report'
_name = 'account.partner.balance'
_description = 'Print Account Partner Balance'
_columns = {
'display_partner': fields.selection([('non-zero_balance', 'With balance is not equal to 0'), ('all', 'All Partners')]
,'Display Partners'),
'journal_ids': fields.many2many('account.journal', 'account_partner_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
# 'initial_balance': True,
'display_partner': 'non-zero_balance',
}
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['display_partner'])[0])
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.partner.balance',
'datas': data,
}
account_partner_balance()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.004371 |
import hashlib
import hmac
import logging
import requests
from datetime import timedelta
from django.utils import timezone
from allauth.socialaccount import app_settings, providers
from allauth.socialaccount.helpers import (
complete_social_login,
render_authentication_error,
)
from allauth.socialaccount.models import SocialLogin, SocialToken
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .forms import FacebookConnectForm
from .provider import GRAPH_API_URL, GRAPH_API_VERSION, FacebookProvider
logger = logging.getLogger(__name__)
def compute_appsecret_proof(app, token):
# Generate an appsecret_proof parameter to secure the Graph API call
# see https://developers.facebook.com/docs/graph-api/securing-requests
msg = token.token.encode('utf-8')
key = app.secret.encode('utf-8')
appsecret_proof = hmac.new(
key,
msg,
digestmod=hashlib.sha256).hexdigest()
return appsecret_proof
def fb_complete_login(request, app, token):
provider = providers.registry.by_id(FacebookProvider.id, request)
resp = requests.get(
GRAPH_API_URL + '/me',
params={
'fields': ','.join(provider.get_fields()),
'access_token': token.token,
'appsecret_proof': compute_appsecret_proof(app, token)
})
resp.raise_for_status()
extra_data = resp.json()
login = provider.sociallogin_from_response(request, extra_data)
return login
class FacebookOAuth2Adapter(OAuth2Adapter):
provider_id = FacebookProvider.id
provider_default_auth_url = (
'https://www.facebook.com/{}/dialog/oauth'.format(
GRAPH_API_VERSION))
settings = app_settings.PROVIDERS.get(provider_id, {})
authorize_url = settings.get('AUTHORIZE_URL', provider_default_auth_url)
access_token_url = GRAPH_API_URL + '/oauth/access_token'
expires_in_key = 'expires_in'
def complete_login(self, request, app, access_token, **kwargs):
return fb_complete_login(request, app, access_token)
oauth2_login = OAuth2LoginView.adapter_view(FacebookOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FacebookOAuth2Adapter)
def login_by_token(request):
ret = None
auth_exception = None
if request.method == 'POST':
form = FacebookConnectForm(request.POST)
if form.is_valid():
try:
provider = providers.registry.by_id(
FacebookProvider.id, request)
login_options = provider.get_fb_login_options(request)
app = provider.get_app(request)
access_token = form.cleaned_data['access_token']
expires_at = None
if login_options.get('auth_type') == 'reauthenticate':
info = requests.get(
GRAPH_API_URL + '/oauth/access_token_info',
params={'client_id': app.client_id,
'access_token': access_token}).json()
nonce = provider.get_nonce(request, pop=True)
ok = nonce and nonce == info.get('auth_nonce')
else:
ok = True
if ok and provider.get_settings().get('EXCHANGE_TOKEN'):
resp = requests.get(
GRAPH_API_URL + '/oauth/access_token',
params={'grant_type': 'fb_exchange_token',
'client_id': app.client_id,
'client_secret': app.secret,
'fb_exchange_token': access_token}).json()
access_token = resp['access_token']
expires_in = resp.get('expires_in')
if expires_in:
expires_at = timezone.now() + timedelta(
seconds=int(expires_in))
if ok:
token = SocialToken(app=app,
token=access_token,
expires_at=expires_at)
login = fb_complete_login(request, app, token)
login.token = token
login.state = SocialLogin.state_from_request(request)
ret = complete_social_login(request, login)
except requests.RequestException as e:
logger.exception('Error accessing FB user profile')
auth_exception = e
if not ret:
ret = render_authentication_error(request,
FacebookProvider.id,
exception=auth_exception)
return ret
| 0 |
bdLibPath=os.path.abspath(sys.argv[0]+"..")
if not bdLibPath in sys.path: sys.path.append(bdLibPath)
from _lib import *
import unittest
class SmokeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_001_GoogleSearch(self):
LaunchBrowser("chrome", "www.google.com")
type(GoogleMap.google_search_input, "Telerik academy")
wait(GoogleMap.google_telerik_link, 10)
assert exists(GoogleMap.google_telerik_link)
def test_002_DragAndDrop(self):
LaunchBrowser("chrome", "http://www.dhtmlgoodies.com/scripts/drag-drop-custom/demo-drag-drop-3.html")
dragDrop(CapitalsMap.oslo, CapitalsMap.norway)
dragDrop(CapitalsMap.stockholm, CapitalsMap.sweden)
dragDrop(CapitalsMap.washington, CapitalsMap.us)
dragDrop(CapitalsMap.copenhagen, CapitalsMap.denmark)
dragDrop(CapitalsMap.seoul, CapitalsMap.southKorea)
dragDrop(CapitalsMap.rome, CapitalsMap.italy)
dragDrop(CapitalsMap.madrid, CapitalsMap.spain)
assert exists(CapitalsMap.correctRome)
assert exists(CapitalsMap.correctMadrid)
assert exists(CapitalsMap.correctOslo)
assert exists(CapitalsMap.correctCopenhagen)
assert exists(CapitalsMap.correctSeoul)
assert exists(CapitalsMap.correctStockholm)
assert exists(CapitalsMap.correctWashington)
def test_003_CalculatorFunctionsCorrectly(self):
LaunchCalculator();
click(CalculatorMap.two)
click(CalculatorMap.subtract)
click(CalculatorMap.four)
click(CalculatorMap.equals)
assert exists(CalculatorMap.subtractionResult)
click(CalculatorMap.multiply)
click(CalculatorMap.three)
click(CalculatorMap.equals)
assert exists(CalculatorMap.multiplyResult)
click(CalculatorMap.add)
click(CalculatorMap.one)
click(CalculatorMap.one)
click(CalculatorMap.equals)
assert exists (CalculatorMap.additionResult)
click(CalculatorMap.divide)
click(CalculatorMap.two)
click(CalculatorMap.equals)
assert (CalculatorMap.divisionResult)
click(CalculatorMap.divide)
click(CalculatorMap.zero)
assert exists(CalculatorMap.divisionByZeroMessage)
class Tests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_003_CalculatorFunctionsCorrectly(self):
LaunchCalculator();
click(CalculatorMap.two)
click(CalculatorMap.subtract)
click(CalculatorMap.four)
click(CalculatorMap.equals)
assert exists(CalculatorMap.subtractionResult)
click(CalculatorMap.multiply)
click(CalculatorMap.three)
click(CalculatorMap.equals)
assert exists(CalculatorMap.multiplyResult)
click(CalculatorMap.add)
click(CalculatorMap.one)
click(CalculatorMap.zero)
click(CalculatorMap.equals)
assert exists (CalculatorMap.additionResult)
click(CalculatorMap.divide)
click(CalculatorMap.two)
click(CalculatorMap.equals)
assert exists(CalculatorMap.divisionResult)
click(CalculatorMap.divide)
click(CalculatorMap.zero)
click(CalculatorMap.equals)
assert exists(CalculatorMap.divisionByZeroMessage)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
outfile = open("report.html", "w")
runner = HTMLTestRunner.HTMLTestRunner(stream=outfile, title='SmokeTests Report')
runner.run(suite)
outfile.close()
| 0.007339 |
# -*- coding: utf-8 -*-
# This file is part of the Horus Project
__author__ = 'Jesús Arroyo Torrens <[email protected]>'
__copyright__ = 'Copyright (C) 2014-2016 Mundo Reader S.L.'
__license__ = 'GNU General Public License v2 http://www.gnu.org/licenses/gpl2.html'
import numpy as np
from horus import Singleton
@Singleton
class Pattern(object):
def __init__(self):
self._rows = 0
self._columns = 0
self._square_width = 0
self.origin_distance = 0
@property
def rows(self):
return self._rows
@rows.setter
def rows(self, value):
value = self.to_int(value)
if self._rows != value:
self._rows = value
self._generate_object_points()
def set_rows(self, value):
self.rows = value
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
value = self.to_int(value)
if self._columns != value:
self._columns = value
self._generate_object_points()
def set_columns(self, value):
self.columns = value
@property
def square_width(self):
return self._square_width
@square_width.setter
def square_width(self, value):
value = self.to_float(value)
if self._square_width != value:
self._square_width = value
self._generate_object_points()
def set_square_width(self, value):
self.square_width = value
def _generate_object_points(self):
objp = np.zeros((self.rows * self.columns, 3), np.float32)
objp[:, :2] = np.mgrid[0:self.columns, 0:self.rows].T.reshape(-1, 2)
objp = np.multiply(objp, self.square_width)
self.object_points = objp
def set_origin_distance(self, value):
self.origin_distance = self.to_float(value)
def to_int(self, value):
try:
value = int(value)
if value > 0:
return value
else:
return 0
except:
return 0
def to_float(self, value):
try:
value = float(value)
if value > 0.0:
return value
else:
return 0.0
except:
return 0.0
| 0.001311 |
'''
Image
=====
The :class:`Image` widget is used to display an image::
wimg = Image(source='mylogo.png')
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style.
For example, if you want your image to be greater than,the size of your widget,
you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
'''
__all__ = ('Image', 'AsyncImage')
from kivy.uix.widget import Widget
from kivy.core.image import Image as CoreImage
from kivy.resources import resource_find
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty
from kivy.logger import Logger
# delayed imports
Loader = None
class Image(Widget):
'''Image class, see module documentation for more information.
'''
source = StringProperty(None)
'''Filename / source of your image.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture. It is streched and positioned during rendering according to
the :attr:`allow_stretch` and :attr:`keep_ratio` properties.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is a :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the image. This represents the original, loaded image
texture size.
.. warning::
The texture size is set after the texture property. So if you listen to
the change on :attr:`texture`, the property texture_size will not be
up-to-date. Use self.texture.size instead.
'''
def get_image_ratio(self):
if self.texture:
return self.texture.width / float(self.texture.height)
return 1.
mipmap = BooleanProperty(False)
'''Indicate if you want OpenGL mipmapping to be applied to the texture.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
'''Ratio of the image (width / float(height).
:attr:`image_ratio` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
color = ListProperty([1, 1, 1, 1])
'''Image color, in the format (r, g, b, a). This attribute can be used to
'tint' an image. Be careful: if the source image is not gray/white, the
color will not really work as expected.
.. versionadded:: 1.0.6
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
allow_stretch = BooleanProperty(False)
'''If True, the normalized image size will be maximized to fit in the image
box. Otherwise, if the box is too tall, the image will not be
stretched more than 1:1 pixels.
.. versionadded:: 1.0.7
:attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keep_ratio = BooleanProperty(True)
'''If False along with allow_stretch being True, the normalized image
size will be maximized to fit in the image box and ignores the aspect
ratio of the image.
Otherwise, if the box is too tall, the image will not be stretched more
than 1:1 pixels.
.. versionadded:: 1.0.8
:attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
keep_data = BooleanProperty(False)
'''If True, the underlaying _coreimage will store the raw image data.
This is useful when performing pixel based collision detection.
.. versionadded:: 1.3.0
:attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
anim_delay = NumericProperty(.25)
'''Delay the animation if the image is sequenced (like an animated gif).
If anim_delay is set to -1, the animation will be stopped.
.. versionadded:: 1.0.8
:attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25 (4 FPS).
'''
anim_loop = NumericProperty(0)
'''Number of loops to play then stop animating. 0 means keep animating.
.. versionadded:: 1.9.0
:attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` defaults
to 0.
'''
nocache = BooleanProperty(False)
'''If this property is set True, the image will not be added to the
internal cache. The cache will simply ignore any calls trying to
append the core image.
.. versionadded:: 1.6.0
:attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
def get_norm_image_size(self):
if not self.texture:
return self.size
ratio = self.image_ratio
w, h = self.size
tw, th = self.texture.size
# ensure that the width is always maximized to the containter width
if self.allow_stretch:
if not self.keep_ratio:
return w, h
iw = w
else:
iw = min(w, tw)
# calculate the appropriate height
ih = iw / ratio
# if the height is too higher, take the height of the container
# and calculate appropriate width. no need to test further. :)
if ih > h:
if self.allow_stretch:
ih = h
else:
ih = min(h, th)
iw = ih * ratio
return iw, ih
norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
'texture', 'size', 'image_ratio', 'allow_stretch'))
'''Normalized image size within the widget box.
This size will always fit the widget size and will preserve the image
ratio.
:attr:`norm_image_size` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
def __init__(self, **kwargs):
self._coreimage = None
self._loops = 0
super(Image, self).__init__(**kwargs)
fbind = self.fbind
update = self.texture_update
fbind('source', update)
fbind('mipmap', update)
if self.source:
update()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def texture_update(self, *largs):
if not self.source:
self.texture = None
else:
filename = resource_find(self.source)
self._loops = 0
if filename is None:
return Logger.error('Image: Error reading file {filename}'.
format(filename=self.source))
mipmap = self.mipmap
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
try:
self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
anim_delay=self.anim_delay,
keep_data=self.keep_data,
nocache=self.nocache)
except:
self._coreimage = ci = None
if ci:
ci.bind(on_texture=self._on_tex_change)
self.texture = ci.texture
def on_anim_delay(self, instance, value):
self._loop = 0
if self._coreimage is None:
return
self._coreimage.anim_delay = value
if value < 0:
self._coreimage.anim_reset(False)
def on_texture(self, instance, value):
if value is not None:
self.texture_size = list(value.size)
def _on_tex_change(self, *largs):
# update texture from core image
self.texture = self._coreimage.texture
ci = self._coreimage
if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1:
self._loops += 1
if self.anim_loop == self._loops:
ci.anim_reset(False)
self._loops = 0
def reload(self):
'''Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
'''
try:
self._coreimage.remove_from_cache()
except AttributeError:
pass
olsource = self.source
self.source = ''
self.source = olsource
def on_nocache(self, *args):
if self.nocache and self._coreimage:
self._coreimage.remove_from_cache()
self._coreimage._nocache = True
class AsyncImage(Image):
'''Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
'''
def __init__(self, **kwargs):
self._coreimage = None
super(AsyncImage, self).__init__(**kwargs)
global Loader
if not Loader:
from kivy.loader import Loader
self.fbind('source', self._load_source)
if self.source:
self._load_source()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def _load_source(self, *args):
source = self.source
if not source:
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
if not self.is_uri(source):
source = resource_find(source)
self._coreimage = image = Loader.image(source,
nocache=self.nocache, mipmap=self.mipmap,
anim_delay=self.anim_delay)
image.bind(on_load=self._on_source_load)
image.bind(on_texture=self._on_tex_change)
self.texture = image.texture
def _on_source_load(self, value):
image = self._coreimage.image
if not image:
return
self.texture = image.texture
def is_uri(self, filename):
proto = filename.split('://', 1)[0]
return proto in ('http', 'https', 'ftp', 'smb')
def _on_tex_change(self, *largs):
if self._coreimage:
self.texture = self._coreimage.texture
def texture_update(self, *largs):
pass
| 0.000337 |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: elasticache_facts
short_description: Retrieve facts for AWS Elasticache clusters
description:
- Retrieve facts from AWS Elasticache clusters
version_added: "2.5"
options:
name:
description:
- The name of an Elasticache cluster
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: obtain all Elasticache facts
elasticache_facts:
- name: obtain all facts for a single Elasticache cluster
elasticache_facts:
name: test_elasticache
'''
RETURN = '''
elasticache_clusters:
description: List of elasticache clusters
returned: always
type: complex
contains:
auto_minor_version_upgrade:
description: Whether to automatically upgrade to minor versions
returned: always
type: bool
sample: true
cache_cluster_create_time:
description: Date and time cluster was created
returned: always
type: string
sample: '2017-09-15T05:43:46.038000+00:00'
cache_cluster_id:
description: ID of the cache cluster
returned: always
type: string
sample: abcd-1234-001
cache_cluster_status:
description: Status of Elasticache cluster
returned: always
type: string
sample: available
cache_node_type:
description: Instance type of Elasticache nodes
returned: always
type: string
sample: cache.t2.micro
cache_nodes:
description: List of Elasticache nodes in the cluster
returned: always
type: complex
contains:
cache_node_create_time:
description: Date and time node was created
returned: always
type: string
sample: '2017-09-15T05:43:46.038000+00:00'
cache_node_id:
description: ID of the cache node
returned: always
type: string
sample: '0001'
cache_node_status:
description: Status of the cache node
returned: always
type: string
sample: available
customer_availability_zone:
description: Availability Zone in which the cache node was created
returned: always
type: string
sample: ap-southeast-2b
endpoint:
description: Connection details for the cache node
returned: always
type: complex
contains:
address:
description: URL of the cache node endpoint
returned: always
type: string
sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
port:
description: Port of the cache node endpoint
returned: always
type: int
sample: 6379
parameter_grou_status:
description: Status of the Cache Parameter Group
returned: always
type: string
sample: in-sync
cache_parameter_group:
description: Contents of the Cache Parameter GGroup
returned: always
type: complex
contains:
cache_node_ids_to_reboot:
description: Cache nodes which need to be rebooted for parameter changes to be applied
returned: always
type: list
sample: []
cache_parameter_group_name:
description: Name of the cache parameter group
returned: always
type: string
sample: default.redis3.2
parameter_apply_status:
description: Status of parameter updates
returned: always
type: string
sample: in-sync
cache_security_groups:
description: Security Groups used by the cache
returned: always
type: list
sample:
- 'sg-abcd1234'
cache_subnet_group_name:
description: Elasticache Subnet Group used by the cache
returned: always
type: string
sample: abcd-subnet-group
client_download_landing_page:
description: URL of client download web page
returned: always
type: string
sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
engine:
description: Engine used by elasticache
returned: always
type: string
sample: redis
engine_version:
description: Version of elasticache engine
returned: always
type: string
sample: 3.2.4
notification_configuration:
description: Configuration of notifications
returned: if notifications are enabled
type: complex
contains:
topic_arn:
description: ARN of notification destination topic
returned: if notifications are enabled
type: string
sample: arn:aws:sns:*:123456789012:my_topic
topic_name:
description: Name of notification destination topic
returned: if notifications are enabled
type: string
sample: MyTopic
num_cache_nodes:
description: Number of Cache Nodes
returned: always
type: int
sample: 1
pending_modified_values:
description: Values that are pending modification
returned: always
type: complex
contains: {}
preferred_availability_zone:
description: Preferred Availability Zone
returned: always
type: string
sample: ap-southeast-2b
preferred_maintenance_window:
description: Time slot for preferred maintenance window
returned: always
type: string
sample: sat:12:00-sat:13:00
replication_group_id:
description: Replication Group Id
returned: always
type: string
sample: replication-001
security_groups:
description: List of Security Groups associated with Elasticache
returned: always
type: complex
contains:
security_group_id:
description: Security Group ID
returned: always
type: string
sample: sg-abcd1234
status:
description: Status of Security Group
returned: always
type: string
sample: active
tags:
description: Tags applied to the elasticache cluster
returned: always
type: complex
sample:
Application: web
Environment: test
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def describe_cache_clusters_with_backoff(client, cluster_id=None):
paginator = client.get_paginator('describe_cache_clusters')
params = dict(ShowCacheNodeInfo=True)
if cluster_id:
params['CacheClusterId'] = cluster_id
try:
response = paginator.paginate(**params).build_full_result()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
return []
raise
except botocore.exceptions.BotoCoreError:
raise
return response['CacheClusters']
@AWSRetry.exponential_backoff()
def get_elasticache_tags_with_backoff(client, cluster_id):
return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
def get_aws_account_id(module):
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Can't authorize connection")
try:
return client.get_caller_identity()['Account']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
def get_elasticache_clusters(client, module, region):
try:
clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
account_id = get_aws_account_id(module)
results = []
for cluster in clusters:
cluster = camel_dict_to_snake_dict(cluster)
arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
try:
tags = get_elasticache_tags_with_backoff(client, arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
results.append(cluster)
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=False),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module, region))
if __name__ == '__main__':
main()
| 0.001913 |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to absolute (and therefore normalized paths).
path = os.path.abspath(path)
relative_to = os.path.abspath(relative_to)
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
'freebsd9': 'freebsd',
}
flavor = flavors.get(sys.platform, 'linux')
return params.get('flavor', flavor)
def CopyTool(flavor, out_path):
"""Finds (mac|sun|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
prefix = { 'solaris': 'sun', 'mac': 'mac', 'win': 'win' }.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| 0.011239 |
#!/usr/bin/env python3
from ev3dev.ev3 import *
from time import sleep
from PIL import Image
import ev3dev.ev3 as ev3
#connect infrared and check it's connected.
ir = InfraredSensor()
assert ir.connected, "Connect a single infrared sensor to port"
#put the infrared sensor into proximity mode.
ir.mode = 'IR-PROX'
#connect color sensor and check it's connected.
cl = ColorSensor()
assert cl.connected
#put the color sensor into color mode
cl.mode= 'COL-COLOR'
#add condition here
#while
distance = ir.value()
if distance < 500:
Leds.set_color(Leds.LEFT, Leds.GREEN)
lcd = Screen()
logo = Image.open('chase.png')
lcd.image.paste(logo, (0,0))
lcd.update()
ev3.Sound.speak('Welcome to JP Morgan Chase. Who are you looking for?').wait()
dest = cl.value()
else:
Leds.all_off()
sleep(2)
#create motor objects
lm = LargeMotor('outB')
rm = LargeMotor ('outC')
destinations = {1: (100, 200), 2:(100, 100), 5:(300, 500)}
desk_speech = 'Taking you to desk number {}'.format(dest)
ev3.Sound.speak(desk_speech).wait()
#go straight for 3 feet (in degrees)
lm.run_to_rel_pos(position_sp=destinations[dest][0], speed_sp=300, stop_action="brake")
rm.run_to_rel_pos(position_sp=destinations[dest][0], speed_sp=300, stop_action="brake")
lm.wait_while('running')
rm.wait_while('running')
#verify the motor is no longer running
#Sound.beep()
#turn right
lm.run_to_rel_pos(position_sp=300, speed_sp=360, stop_action="brake")
rm.run_to_rel_pos(position_sp=-300, speed_sp=360, stop_action="brake")
#go straight 2 feet (in degrees)
lm.wait_while('running')
rm.wait_while('running')
lm.run_to_rel_pos(position_sp=destinations[dest][1], speed_sp=900, stop_action="brake")
rm.run_to_rel_pos(position_sp=destinations[dest][1], speed_sp=900, stop_action="brake")
| 0.018676 |
import angr
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
######################################
# fdopen
#
# Reference for implementation:
# glibc-2.25/libio/iofdopen.c
######################################
def mode_to_flag(mode):
# TODO improve this: handle mode = strings
if mode[-1] == b'b': # lol who uses windows
mode = mode[:-1]
all_modes = {
b"r" : angr.storage.file.Flags.O_RDONLY,
b"r+" : angr.storage.file.Flags.O_RDWR,
b"w" : angr.storage.file.Flags.O_WRONLY | angr.storage.file.Flags.O_CREAT,
b"w+" : angr.storage.file.Flags.O_RDWR | angr.storage.file.Flags.O_CREAT,
b"a" : angr.storage.file.Flags.O_WRONLY | angr.storage.file.Flags.O_CREAT | angr.storage.file.Flags.O_APPEND,
b"a+" : angr.storage.file.Flags.O_RDWR | angr.storage.file.Flags.O_CREAT | angr.storage.file.Flags.O_APPEND
}
if mode not in all_modes:
raise angr.SimProcedureError('unsupported file open mode %s' % mode)
return all_modes[mode]
class fdopen(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd_int, m_addr):
#pylint:disable=unused-variable
strlen = angr.SIM_PROCEDURES['libc']['strlen']
m_strlen = self.inline_call(strlen, m_addr)
m_expr = self.state.memory.load(m_addr, m_strlen.max_null_index, endness='Iend_BE')
mode = self.state.solver.eval(m_expr, cast_to=bytes)
# TODO: handle append and other mode subtleties
fd = self.state.solver.eval(fd_int)
if fd not in self.state.posix.fd:
# if file descriptor not found return NULL
return 0
else:
# Allocate a FILE struct in heap
malloc = angr.SIM_PROCEDURES['libc']['malloc']
io_file_data = io_file_data_for_arch(self.state.arch)
file_struct_ptr = self.inline_call(malloc, io_file_data['size']).ret_expr
# Write the fd
fd_bvv = self.state.solver.BVV(fd, 4 * 8) # int
self.state.memory.store(file_struct_ptr + io_file_data['fd'],
fd_bvv,
endness=self.state.arch.memory_endness)
return file_struct_ptr
| 0.007515 |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 25