text
stringlengths 820
1M
| score
float64 0
0.24
|
---|---|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import curses
import time
import os
import re
import sys
import tempfile
def terminate(main_screen):
"""
:returns: TODO
"""
curses.nocbreak();
main_screen.keypad(0);
curses.echo()
curses.endwin()
def init():
"""
:returns: TODO
"""
main_screen=curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
return main_screen
def regexize(SEARCH):
"""TODO: Docstring for regexize.
:SEARCH: TODO
:returns: TODO
"""
REGEX_SEARCH=""
for i in range(len(SEARCH)):
REGEX_SEARCH+=".*"+SEARCH[i]
return REGEX_SEARCH
def print_debug(main_screen, text):
"""TODO: Docstring for print_debug.
:main_screen: TODO
:text: TODO
:returns: TODO
"""
main_screen.addstr(max_height-1,0,"Debug: "+text)
# inBufferName = tempfile.mktemp()
# inBuffer = open(inBufferName, "w+")
# inBuffer.write(sys.stdin.read())
# inBuffer.close()
lineBuffer=[]
lineBuffer = sys.stdin.readlines()
# print(lineBuffer)
sys.stdin = open("/dev/tty", "r")
# a = input("put an input")
# print(a)
# sys.exit(1)
main_screen = init() # main window
max_height = int(main_screen.getmaxyx()[0])
buffer_height = max_height - 2
# lineBuffer = getHistory()
# lineBuffer = open(inBufferName)
PROMPT="Pick:"
main_screen.addstr(0, 0, PROMPT, curses.A_REVERSE)
main_screen.refresh()
SEARCH=""
# keys
ESC = 27
ENTER = 10
Ctrl_P = 16
Ctrl_N = 14
DOWN = curses.KEY_DOWN
UP = curses.KEY_UP
selected_match=1
matches=[]
i=0
print_debug(main_screen,"%s"%i)
try:
while 1:
++i
time.sleep(1)
c=32
# c = main_screen.getch()
# c = input("sdf")
# print(c)
# clean every time
for j in range(len(matches)):
main_screen.addstr(j+1,0," "*(len(matches[j])+2))
main_screen.addstr(0, len(PROMPT)+1," "*len(SEARCH))
# curses.KEY_UP
if c==127:
SEARCH=SEARCH[:-1]
elif c == Ctrl_N or c == DOWN:
selected_match= (selected_match+1) if selected_match-len(matches) else len(matches)
elif c == Ctrl_P or c == UP:
selected_match= (selected_match-1) if selected_match-1 else 1
elif c == ESC: # ESC
terminate(main_screen)
sys.exit(0)
elif c == ENTER: # ENTER
# curses.flash()
terminate(main_screen)
os.system(matches[selected_match-1])
sys.exit(0)
elif 126>=c>=32:
SEARCH+=chr(c)
matches=[]
main_screen.addstr(selected_match, 1,">")
main_screen.addstr(0, len(PROMPT)+1,SEARCH)
# print_debug(main_screen,str(c))
print_debug(main_screen,"%s"%i)
for j,line in enumerate(lineBuffer):
if re.match(regexize(SEARCH),line, re.IGNORECASE):
matches.append(line)
main_screen.addstr(len(matches),3,line)
if len(matches)>=buffer_height:
break
except:
terminate(main_screen)
else:
terminate(main_screen)
#vim-run: python3 % <<<"asdfsdf adsfasdf\nasdf"
#vim-run: echo -e "1\n2\n3" | python3 %
#vim-run: python3 %
| 0.016429 |
import os
import pickle
import copy
import numpy as np
CODES = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, '<GO>': 3 }
def load_data(path):
"""
Load Dataset from File
"""
input_file = os.path.join(path)
with open(input_file, 'r', encoding='utf-8') as f:
return f.read()
def preprocess_and_save_data(source_path, target_path, text_to_ids):
"""
Preprocess Text Data. Save to to file.
"""
# Preprocess
source_text = load_data(source_path)
target_text = load_data(target_path)
source_text = source_text.lower()
target_text = target_text.lower()
source_vocab_to_int, source_int_to_vocab = create_lookup_tables(source_text)
target_vocab_to_int, target_int_to_vocab = create_lookup_tables(target_text)
source_text, target_text = text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int)
# Save Data
with open('preprocess.p', 'wb') as out_file:
pickle.dump((
(source_text, target_text),
(source_vocab_to_int, target_vocab_to_int),
(source_int_to_vocab, target_int_to_vocab)), out_file)
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
with open('preprocess.p', mode='rb') as in_file:
return pickle.load(in_file)
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
"""
vocab = set(text.split())
vocab_to_int = copy.copy(CODES)
for v_i, v in enumerate(vocab, len(CODES)):
vocab_to_int[v] = v_i
int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
def save_params(params):
"""
Save parameters to file
"""
with open('params.p', 'wb') as out_file:
pickle.dump(params, out_file)
def load_params():
"""
Load parameters from file
"""
with open('params.p', mode='rb') as in_file:
return pickle.load(in_file)
def batch_data(source, target, batch_size):
"""
Batch source and target together
"""
for batch_i in range(0, len(source)//batch_size):
start_i = batch_i * batch_size
source_batch = source[start_i:start_i + batch_size]
target_batch = target[start_i:start_i + batch_size]
yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))
def pad_sentence_batch(sentence_batch):
"""
Pad sentence with <PAD> id
"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))
for sentence in sentence_batch]
| 0.002236 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Etienne Carriere <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_virtual_server
short_description: "Manages F5 BIG-IP LTM virtual servers"
description:
- "Manages F5 BIG-IP LTM virtual servers via iControl SOAP API"
version_added: "2.1"
author:
- Etienne Carriere (@Etienne-Carriere)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Virtual Server state
- Absent, delete the VS if present
- C(present) (and its synonym enabled), create if needed the VS and set
state to enabled
- C(disabled), create if needed the VS and set state to disabled
required: false
default: present
choices:
- present
- absent
- enabled
- disabled
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
name:
description:
- Virtual server name
required: true
aliases:
- vs
destination:
description:
- Destination IP of the virtual server (only host is currently supported).
Required when state=present and vs does not exist.
required: true
aliases:
- address
- ip
port:
description:
- Port of the virtual server . Required when state=present and vs does not exist
required: false
default: None
all_profiles:
description:
- List of all Profiles (HTTP,ClientSSL,ServerSSL,etc) that must be used
by the virtual server
required: false
default: None
all_rules:
version_added: "2.2"
description:
- List of rules to be applied in priority order
required: false
default: None
enabled_vlans:
version_added: "2.2"
description:
- List of vlans to be enabled. When a VLAN named C(ALL) is used, all
VLANs will be allowed.
required: false
default: None
pool:
description:
- Default pool for the virtual server
required: false
default: None
snat:
description:
- Source network address policy
required: false
choices:
- None
- Automap
- Name of a SNAT pool (eg "/Common/snat_pool_name") to enable SNAT with the specific pool
default: None
default_persistence_profile:
description:
- Default Profile which manages the session persistence
required: false
default: None
route_advertisement_state:
description:
- Enable route advertisement for destination
required: false
default: disabled
version_added: "2.3"
description:
description:
- Virtual server description
required: false
default: None
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: MyPartition
name: myvirtualserver
destination: "{{ ansible_default_ipv4['address'] }}"
port: 443
pool: "{{ mypool }}"
snat: Automap
description: Test Virtual Server
all_profiles:
- http
- clientssl
enabled_vlans:
- /Common/vlan2
delegate_to: localhost
- name: Modify Port of the Virtual Server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: present
partition: MyPartition
name: myvirtualserver
port: 8080
delegate_to: localhost
- name: Delete virtual server
bigip_virtual_server:
server: lb.mydomain.net
user: admin
password: secret
state: absent
partition: MyPartition
name: myvirtualserver
delegate_to: localhost
'''
RETURN = '''
---
deleted:
description: Name of a virtual server that was deleted
returned: changed
type: string
sample: "my-virtual-server"
'''
# map of state values
STATES = {
'enabled': 'STATE_ENABLED',
'disabled': 'STATE_DISABLED'
}
STATUSES = {
'enabled': 'SESSION_STATUS_ENABLED',
'disabled': 'SESSION_STATUS_DISABLED',
'offline': 'SESSION_STATUS_FORCED_DISABLED'
}
def vs_exists(api, vs):
# hack to determine if pool exists
result = False
try:
api.LocalLB.VirtualServer.get_object_status(virtual_servers=[vs])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def vs_create(api, name, destination, port, pool):
_profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]]
created = False
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the vs doesn't exist,
# it may exist by the time we run create_vs().
# this catches the exception and does something smart
# about it!
try:
api.LocalLB.VirtualServer.create(
definitions=[{'name': [name], 'address': [destination], 'port': port, 'protocol': 'PROTOCOL_TCP'}],
wildmasks=['255.255.255.255'],
resources=[{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': pool}],
profiles=_profiles)
created = True
return created
except bigsuds.OperationFailed as e:
if "already exists" not in str(e):
raise Exception('Error on creating Virtual Server : %s' % e)
def vs_remove(api, name):
api.LocalLB.VirtualServer.delete_virtual_server(
virtual_servers=[name]
)
def get_rules(api, name):
return api.LocalLB.VirtualServer.get_rule(
virtual_servers=[name]
)[0]
def set_rules(api, name, rules_list):
updated = False
if rules_list is None:
return False
rules_list = list(enumerate(rules_list))
try:
current_rules = map(lambda x: (x['priority'], x['rule_name']), get_rules(api, name))
to_add_rules = []
for i, x in rules_list:
if (i, x) not in current_rules:
to_add_rules.append({'priority': i, 'rule_name': x})
to_del_rules = []
for i, x in current_rules:
if (i, x) not in rules_list:
to_del_rules.append({'priority': i, 'rule_name': x})
if len(to_del_rules) > 0:
api.LocalLB.VirtualServer.remove_rule(
virtual_servers=[name],
rules=[to_del_rules]
)
updated = True
if len(to_add_rules) > 0:
api.LocalLB.VirtualServer.add_rule(
virtual_servers=[name],
rules=[to_add_rules]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting rules : %s' % e)
def get_profiles(api, name):
return api.LocalLB.VirtualServer.get_profile(
virtual_servers=[name]
)[0]
def set_profiles(api, name, profiles_list):
updated = False
try:
if profiles_list is None:
return False
current_profiles = list(map(lambda x: x['profile_name'], get_profiles(api, name)))
to_add_profiles = []
for x in profiles_list:
if x not in current_profiles:
to_add_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
to_del_profiles = []
for x in current_profiles:
if (x not in profiles_list) and (x != "/Common/tcp"):
to_del_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x})
if len(to_del_profiles) > 0:
api.LocalLB.VirtualServer.remove_profile(
virtual_servers=[name],
profiles=[to_del_profiles]
)
updated = True
if len(to_add_profiles) > 0:
api.LocalLB.VirtualServer.add_profile(
virtual_servers=[name],
profiles=[to_add_profiles]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting profiles : %s' % e)
def get_vlan(api, name):
return api.LocalLB.VirtualServer.get_vlan(
virtual_servers=[name]
)[0]
def set_enabled_vlans(api, name, vlans_enabled_list):
updated = False
to_add_vlans = []
try:
if vlans_enabled_list is None:
return updated
current_vlans = get_vlan(api, name)
# Set allowed list back to default ("all")
#
# This case allows you to undo what you may have previously done.
# The default case is "All VLANs and Tunnels". This case will handle
# that situation.
if 'ALL' in vlans_enabled_list:
# The user is coming from a situation where they previously
# were specifying a list of allowed VLANs
if len(current_vlans['vlans']) > 0 or \
current_vlans['state'] is "STATE_ENABLED":
api.LocalLB.VirtualServer.set_vlan(
virtual_servers=[name],
vlans=[{'state': 'STATE_DISABLED', 'vlans': []}]
)
updated = True
else:
if current_vlans['state'] is "STATE_DISABLED":
to_add_vlans = vlans_enabled_list
else:
for vlan in vlans_enabled_list:
if vlan not in current_vlans['vlans']:
updated = True
to_add_vlans = vlans_enabled_list
break
if updated:
api.LocalLB.VirtualServer.set_vlan(
virtual_servers=[name],
vlans=[{
'state': 'STATE_ENABLED',
'vlans': [to_add_vlans]
}]
)
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting enabled vlans : %s' % e)
def set_snat(api, name, snat):
updated = False
try:
current_state = get_snat_type(api, name)
current_snat_pool = get_snat_pool(api, name)
if snat is None:
return updated
elif snat == 'None' and current_state != 'SRC_TRANS_NONE':
api.LocalLB.VirtualServer.set_source_address_translation_none(
virtual_servers=[name]
)
updated = True
elif snat == 'Automap' and current_state != 'SRC_TRANS_AUTOMAP':
api.LocalLB.VirtualServer.set_source_address_translation_automap(
virtual_servers=[name]
)
updated = True
elif snat_settings_need_updating(snat, current_state, current_snat_pool):
api.LocalLB.VirtualServer.set_source_address_translation_snat_pool(
virtual_servers=[name],
pools=[snat]
)
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting snat : %s' % e)
def get_snat_type(api, name):
return api.LocalLB.VirtualServer.get_source_address_translation_type(
virtual_servers=[name]
)[0]
def get_snat_pool(api, name):
return api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(
virtual_servers=[name]
)[0]
def snat_settings_need_updating(snat, current_state, current_snat_pool):
if snat == 'None' or snat == 'Automap':
return False
elif snat and current_state != 'SRC_TRANS_SNATPOOL':
return True
elif snat and current_state == 'SRC_TRANS_SNATPOOL' and current_snat_pool != snat:
return True
else:
return False
def get_pool(api, name):
return api.LocalLB.VirtualServer.get_default_pool_name(
virtual_servers=[name]
)[0]
def set_pool(api, name, pool):
updated = False
try:
current_pool = get_pool(api, name)
if pool is not None and (pool != current_pool):
api.LocalLB.VirtualServer.set_default_pool_name(
virtual_servers=[name],
default_pools=[pool]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting pool : %s' % e)
def get_destination(api, name):
return api.LocalLB.VirtualServer.get_destination_v2(
virtual_servers=[name]
)[0]
def set_destination(api, name, destination):
updated = False
try:
current_destination = get_destination(api, name)
if destination is not None and destination != current_destination['address']:
api.LocalLB.VirtualServer.set_destination_v2(
virtual_servers=[name],
destinations=[{'address': destination, 'port': current_destination['port']}]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting destination : %s' % e)
def set_port(api, name, port):
updated = False
try:
current_destination = get_destination(api, name)
if port is not None and port != current_destination['port']:
api.LocalLB.VirtualServer.set_destination_v2(
virtual_servers=[name],
destinations=[{'address': current_destination['address'], 'port': port}]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting port : %s' % e)
def get_state(api, name):
return api.LocalLB.VirtualServer.get_enabled_state(
virtual_servers=[name]
)[0]
def set_state(api, name, state):
updated = False
try:
current_state = get_state(api, name)
# We consider that being present is equivalent to enabled
if state == 'present':
state = 'enabled'
if STATES[state] != current_state:
api.LocalLB.VirtualServer.set_enabled_state(
virtual_servers=[name],
states=[STATES[state]]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting state : %s' % e)
def get_description(api, name):
return api.LocalLB.VirtualServer.get_description(
virtual_servers=[name]
)[0]
def set_description(api, name, description):
updated = False
try:
current_description = get_description(api, name)
if description is not None and current_description != description:
api.LocalLB.VirtualServer.set_description(
virtual_servers=[name],
descriptions=[description]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting description : %s ' % e)
def get_persistence_profiles(api, name):
return api.LocalLB.VirtualServer.get_persistence_profile(
virtual_servers=[name]
)[0]
def set_default_persistence_profiles(api, name, persistence_profile):
updated = False
if persistence_profile is None:
return updated
try:
current_persistence_profiles = get_persistence_profiles(api, name)
default = None
for profile in current_persistence_profiles:
if profile['default_profile']:
default = profile['profile_name']
break
if default is not None and default != persistence_profile:
api.LocalLB.VirtualServer.remove_persistence_profile(
virtual_servers=[name],
profiles=[[{'profile_name': default, 'default_profile': True}]]
)
if default != persistence_profile:
api.LocalLB.VirtualServer.add_persistence_profile(
virtual_servers=[name],
profiles=[[{'profile_name': persistence_profile, 'default_profile': True}]]
)
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting default persistence profile : %s' % e)
def get_route_advertisement_status(api, address):
result = api.LocalLB.VirtualAddressV2.get_route_advertisement_state(virtual_addresses=[address]).pop(0)
result = result.split("STATE_")[-1].lower()
return result
def set_route_advertisement_state(api, destination, partition, route_advertisement_state):
updated = False
try:
state = "STATE_%s" % route_advertisement_state.strip().upper()
address = fq_name(partition, destination,)
current_route_advertisement_state=get_route_advertisement_status(api,address)
if current_route_advertisement_state != route_advertisement_state:
api.LocalLB.VirtualAddressV2.set_route_advertisement_state(virtual_addresses=[address], states=[state])
updated = True
return updated
except bigsuds.OperationFailed as e:
raise Exception('Error on setting profiles : %s' % e)
def main():
argument_spec = f5_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present',
choices=['present', 'absent', 'disabled', 'enabled']),
name=dict(type='str', required=True, aliases=['vs']),
destination=dict(type='str', aliases=['address', 'ip']),
port=dict(type='int'),
all_profiles=dict(type='list'),
all_rules=dict(type='list'),
enabled_vlans=dict(type='list'),
pool=dict(type='str'),
description=dict(type='str'),
snat=dict(type='str'),
route_advertisement_state=dict(type='str', default='disabled', choices=['enabled', 'disabled']),
default_persistence_profile=dict(type='str')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
name = fq_name(partition, module.params['name'])
destination = module.params['destination']
port = module.params['port']
all_profiles = fq_list_names(partition, module.params['all_profiles'])
all_rules = fq_list_names(partition, module.params['all_rules'])
enabled_vlans = module.params['enabled_vlans']
if enabled_vlans is None or 'ALL' in enabled_vlans:
all_enabled_vlans = enabled_vlans
else:
all_enabled_vlans = fq_list_names(partition, enabled_vlans)
pool = fq_name(partition, module.params['pool'])
description = module.params['description']
snat = module.params['snat']
route_advertisement_state = module.params['route_advertisement_state']
default_persistence_profile = fq_name(partition, module.params['default_persistence_profile'])
if 1 > port > 65535:
module.fail_json(msg="valid ports must be in range 1 - 65535")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if not module.check_mode:
if vs_exists(api, name):
# hack to handle concurrent runs of module
# pool might be gone before we actually remove
try:
vs_remove(api, name)
result = {'changed': True, 'deleted': name}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result['changed'] = False
else:
raise
else:
# check-mode return value
result = {'changed': True}
else:
update = False
if not vs_exists(api, name):
if (not destination) or (not port):
module.fail_json(msg="both destination and port must be supplied to create a VS")
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the virtual_server doesn't exist,
# it may exist by the time we run virtual_server().
# this catches the exception and does something smart
# about it!
try:
vs_create(api, name, destination, port, pool)
set_profiles(api, name, all_profiles)
set_enabled_vlans(api, name, all_enabled_vlans)
set_rules(api, name, all_rules)
set_snat(api, name, snat)
set_description(api, name, description)
set_default_persistence_profiles(api, name, default_persistence_profile)
set_state(api, name, state)
set_route_advertisement_state(api, destination, partition, route_advertisement_state)
result = {'changed': True}
except bigsuds.OperationFailed as e:
raise Exception('Error on creating Virtual Server : %s' % e)
else:
# check-mode return value
result = {'changed': True}
else:
update = True
if update:
# VS exists
if not module.check_mode:
# Have a transaction for all the changes
try:
api.System.Session.start_transaction()
result['changed'] |= set_destination(api, name, fq_name(partition, destination))
result['changed'] |= set_port(api, name, port)
result['changed'] |= set_pool(api, name, pool)
result['changed'] |= set_description(api, name, description)
result['changed'] |= set_snat(api, name, snat)
result['changed'] |= set_profiles(api, name, all_profiles)
result['changed'] |= set_enabled_vlans(api, name, all_enabled_vlans)
result['changed'] |= set_rules(api, name, all_rules)
result['changed'] |= set_default_persistence_profiles(api, name, default_persistence_profile)
result['changed'] |= set_state(api, name, state)
result['changed'] |= set_route_advertisement_state(api, destination, partition, route_advertisement_state)
api.System.Session.submit_transaction()
except Exception as e:
raise Exception("Error on updating Virtual Server : %s" % e)
else:
# check-mode return value
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| 0.00171 |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all placements. To create placements, run
create_placement.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201211')
# Get placements by statement.
placements = DfpUtils.GetAllEntitiesByStatementWithService(placement_service)
# Display results.
for placement in placements:
print ('Placement with id \'%s\' and name \'%s\' was found.'
% (placement['id'], placement['name']))
print
print 'Number of results found: %s' % len(placements)
| 0.003077 |
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Rdata Types.
@var _by_text: The rdata type textual name to value mapping
@type _by_text: dict
@var _by_value: The rdata type value to textual name mapping
@type _by_value: dict
@var _metatypes: If an rdatatype is a metatype, there will be a mapping
whose key is the rdatatype value and whose value is True in this dictionary.
@type _metatypes: dict
@var _singletons: If an rdatatype is a singleton, there will be a mapping
whose key is the rdatatype value and whose value is True in this dictionary.
@type _singletons: dict"""
import re
import dns.exception
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
HIP = 55
SPF = 99
UNSPEC = 103
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
TA = 32768
DLV = 32769
_by_text = {
'NONE' : NONE,
'A' : A,
'NS' : NS,
'MD' : MD,
'MF' : MF,
'CNAME' : CNAME,
'SOA' : SOA,
'MB' : MB,
'MG' : MG,
'MR' : MR,
'NULL' : NULL,
'WKS' : WKS,
'PTR' : PTR,
'HINFO' : HINFO,
'MINFO' : MINFO,
'MX' : MX,
'TXT' : TXT,
'RP' : RP,
'AFSDB' : AFSDB,
'X25' : X25,
'ISDN' : ISDN,
'RT' : RT,
'NSAP' : NSAP,
'NSAP-PTR' : NSAP_PTR,
'SIG' : SIG,
'KEY' : KEY,
'PX' : PX,
'GPOS' : GPOS,
'AAAA' : AAAA,
'LOC' : LOC,
'NXT' : NXT,
'SRV' : SRV,
'NAPTR' : NAPTR,
'KX' : KX,
'CERT' : CERT,
'A6' : A6,
'DNAME' : DNAME,
'OPT' : OPT,
'APL' : APL,
'DS' : DS,
'SSHFP' : SSHFP,
'IPSECKEY' : IPSECKEY,
'RRSIG' : RRSIG,
'NSEC' : NSEC,
'DNSKEY' : DNSKEY,
'DHCID' : DHCID,
'NSEC3' : NSEC3,
'NSEC3PARAM' : NSEC3PARAM,
'HIP' : HIP,
'SPF' : SPF,
'UNSPEC' : UNSPEC,
'TKEY' : TKEY,
'TSIG' : TSIG,
'IXFR' : IXFR,
'AXFR' : AXFR,
'MAILB' : MAILB,
'MAILA' : MAILA,
'ANY' : ANY,
'TA' : TA,
'DLV' : DLV,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
_metatypes = {
OPT : True
}
_singletons = {
SOA : True,
NXT : True,
DNAME : True,
NSEC : True,
# CNAME is technically a singleton, but we allow multiple CNAMEs.
}
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I);
class UnknownRdatatype(dns.exception.DNSException):
"""Raised if a type is unknown."""
pass
def from_text(text):
"""Convert text into a DNS rdata type value.
@param text: the text
@type text: string
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: int"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match == None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value
def to_text(value):
"""Convert a DNS rdata type to text.
@param value: the rdata type value
@type value: int
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: string"""
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'TYPE' + `value`
return text
def is_metatype(rdtype):
"""True if the type is a metatype.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if rdtype >= TKEY and rdtype <= ANY or _metatypes.has_key(rdtype):
return True
return False
def is_singleton(rdtype):
"""True if the type is a singleton.
@param rdtype: the type
@type rdtype: int
@rtype: bool"""
if _singletons.has_key(rdtype):
return True
return False
| 0.014526 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Directory listing."""
# system imports
import os
import urllib
import stat
import time
# twisted imports
from higgins.http import iweb, resource, http, http_headers
def formatFileSize(size):
if size < 1024:
return '%i' % size
elif size < (1024**2):
return '%iK' % (size / 1024)
elif size < (1024**3):
return '%iM' % (size / (1024**2))
else:
return '%iG' % (size / (1024**3))
class DirectoryLister(resource.Resource):
def __init__(self, pathname, dirs=None,
contentTypes={},
contentEncodings={},
defaultType='text/html'):
self.contentTypes = contentTypes
self.contentEncodings = contentEncodings
self.defaultType = defaultType
# dirs allows usage of the File to specify what gets listed
self.dirs = dirs
self.path = pathname
resource.Resource.__init__(self)
def data_listing(self, request, data):
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
files = []
for path in directory:
url = urllib.quote(path, '/')
fullpath = os.path.join(self.path, path)
try:
st = os.stat(fullpath)
except OSError:
continue
if stat.S_ISDIR(st.st_mode):
url = url + '/'
files.append({
'link': url,
'linktext': path + "/",
'size': '',
'type': '-',
'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime))
})
else:
from higgins.http.static import getTypeAndEncoding
mimetype, encoding = getTypeAndEncoding(
path,
self.contentTypes, self.contentEncodings, self.defaultType)
filesize = st.st_size
files.append({
'link': url,
'linktext': path,
'size': formatFileSize(filesize),
'type': mimetype,
'lastmod': time.strftime("%Y-%b-%d %H:%M", time.localtime(st.st_mtime))
})
return files
def __repr__(self):
return '<DirectoryLister of %r>' % self.path
__str__ = __repr__
def render(self, request):
title = "Directory listing for %s" % urllib.unquote(request.path)
s= """<html><head><title>%s</title><style>
th, .even td, .odd td { padding-right: 0.5em; font-family: monospace}
.even-dir { background-color: #efe0ef }
.even { background-color: #eee }
.odd-dir {background-color: #f0d0ef }
.odd { background-color: #dedede }
.icon { text-align: center }
.listing {
margin-left: auto;
margin-right: auto;
width: 50%%;
padding: 0.1em;
}
body { border: 0; padding: 0; margin: 0; background-color: #efefef;}
h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;}
</style></head><body><div class="directory-listing"><h1>%s</h1>""" % (title,title)
s+="<table>"
s+="<tr><th>Filename</th><th>Size</th><th>Last Modified</th><th>File Type</th></tr>"
even = False
for row in self.data_listing(request, None):
s+='<tr class="%s">' % (even and 'even' or 'odd',)
s+='<td><a href="%(link)s">%(linktext)s</a></td><td align="right">%(size)s</td><td>%(lastmod)s</td><td>%(type)s</td></tr>' % row
even = not even
s+="</table></div></body></html>"
response = http.Response(200, {}, s)
response.headers.setHeader("content-type", http_headers.MimeType('text', 'html'))
return response
__all__ = ['DirectoryLister']
| 0.005569 |
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
class EBSCliException(Exception):
'''Base exception class for all exceptions generated in EB Cli'''
pass
class ArgumentError(EBSCliException):
'''Command line argument error'''
pass
class ValidationError(EBSCliException):
'''Exception raised when validation fails'''
pass
class ApplicationNotExistError(EBSCliException):
'''Exception raised when expected application not exists'''
pass
class ApplicationVersionNotExistError(EBSCliException):
'''Exception raised when expected application version not exists'''
pass
class EnvironmentNotExistError(EBSCliException):
'''Exception raised when expected environment not exists'''
pass
class EBConfigFileNotExistError(EBSCliException):
'''Exception raised when Elastic Beanstalk configuration file not exists.'''
pass
| 0.004479 |
import datetime
import os
from decimal import Decimal
from unittest import mock, skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF,
CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost,
Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage,
ImprovedArticle, ImprovedArticleWithParentLink, Inventory,
NullableUniqueCharFieldModel, Person, Photo, Post, Price, Product,
Publication, PublicationDefaults, StrictAssignmentAll,
StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple,
Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, OptionalImageFile, NoExtensionImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class NoExtensionImageFileForm(forms.ModelForm):
class Meta:
model = NoExtensionImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user', last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_save_blank_null_unique_charfield_saves_null(self):
form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields=['codename'])
empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None
form = form_class(data={'codename': ''})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
# Save a second form to verify there isn't a unique constraint violation.
form = form_class(data={'codename': ''})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.instance.codename, empty_value)
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
with self.assertRaisesMessage(FieldError, 'no-field'):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
def test_extra_declared_field_model_form(self):
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
def test_extra_field_modelform_factory(self):
with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
msg = "{'quantity': ['This field cannot be null.']}"
with self.assertRaisesMessage(ValidationError, msg):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
msg = 'ModelForm has no model class specified.'
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaisesMessage(ValueError, msg):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields), ['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" required></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields), ['slug', 'name'])
def test_default_populated_on_optional_field(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(max_length=255, required=False)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data uses the model field default.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, 'di')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
# Blank data doesn't use the model field default.
mf2 = PubForm({'mode': ''})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.mode, '')
def test_default_not_populated_on_optional_checkbox_input(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('active',)
# Empty data doesn't use the model default because CheckboxInput
# doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertIs(m1.active, False)
self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput)
self.assertIs(m1._meta.get_field('active').get_default(), True)
def test_default_not_populated_on_checkboxselectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unchecked
# CheckboxSelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_default_not_populated_on_selectmultiple(self):
class PubForm(forms.ModelForm):
mode = forms.CharField(required=False, widget=forms.SelectMultiple)
class Meta:
model = PublicationDefaults
fields = ('mode',)
# Empty data doesn't use the model default because an unselected
# SelectMultiple doesn't have a value in HTML form submission.
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, '')
self.assertEqual(m1._meta.get_field('mode').get_default(), 'di')
def test_prefixed_form_with_default_field(self):
class PubForm(forms.ModelForm):
prefix = 'form-prefix'
class Meta:
model = PublicationDefaults
fields = ('mode',)
mode = 'de'
self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default())
mf1 = PubForm({'form-prefix-mode': mode})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.mode, mode)
def test_renderer_kwarg(self):
custom = object()
self.assertIs(ProductForm(renderer=custom).renderer, custom)
def test_default_splitdatetime_field(self):
class PubForm(forms.ModelForm):
datetime_published = forms.SplitDateTimeField(required=False)
class Meta:
model = PublicationDefaults
fields = ('datetime_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1))
mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1))
def test_default_filefield(self):
class PubForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('file',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.file.name, 'default.txt')
mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.file.name, 'name')
def test_default_selectdatewidget(self):
class PubForm(forms.ModelForm):
date_published = forms.DateField(required=False, widget=forms.SelectDateWidget)
class Meta:
model = PublicationDefaults
fields = ('date_published',)
mf1 = PubForm({})
self.assertEqual(mf1.errors, {})
m1 = mf1.save(commit=False)
self.assertEqual(m1.date_published, datetime.date.today())
mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'})
self.assertEqual(mf2.errors, {})
m2 = mf2.save(commit=False)
self.assertEqual(m2.date_published, datetime.date(2010, 1, 1))
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" required>',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" required>',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
@classmethod
def setUpTestData(cls):
cls.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_unique_together_exclusion(self):
"""
Forms don't validate unique_together constraints when only part of the
constraint is included in the form's fields. This allows using
form.save(commit=False) and then assigning the missing field(s) to the
model instance.
"""
class BookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = ('isbn', 'suffix1')
# The unique_together is on suffix1/suffix2 but only suffix1 is part
# of the form. The fields must have defaults, otherwise they'll be
# skipped by other logic.
self.assertEqual(DerivedBook._meta.unique_together, (('suffix1', 'suffix2'),))
for name in ('suffix1', 'suffix2'):
with self.subTest(name=name):
field = DerivedBook._meta.get_field(name)
self.assertEqual(field.default, 0)
# The form fails validation with "Derived book with this Suffix1 and
# Suffix2 already exists." if the unique_together validation isn't
# skipped.
DerivedBook.objects.create(isbn='12345')
form = BookForm({'isbn': '56789', 'suffix1': '0'})
self.assertTrue(form.is_valid(), form.errors)
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other', 'author': self.writer.pk, 'isbn': isbn,
'suffix1': '1', 'suffix2': '2',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(
form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'],
)
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
if connection.features.interprets_empty_strings_as_nulls:
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
else:
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = PostForm(data, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1),
)
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'}
form = DerivedPostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"}
form = FlexDatePostForm(data, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(
title="Django 1.0 is released", slug="Django 1.0",
subtitle="Finally", posted=datetime.date(2008, 9, 3),
)
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
self.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
self.c3 = Category.objects.create(name='Third test', slug='third-test', url='third')
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" required>""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" required></li>
<li>Slug: <input type="text" name="slug" maxlength="20" required></li>
<li>The URL: <input type="text" name="url" maxlength="40" required></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s" selected>It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
str(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required><br>
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': str(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(
Article,
fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield,
)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" required></li>
<li><label for="id_categories">Categories:</label>
<select multiple name="categories" id="id_categories">
<option value="%d" selected>Entertainment</option>
<option value="%d" selected>It&39;s a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({
'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment',
})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
msg = "The Category could not be created because the data didn't validate."
with self.assertRaisesMessage(ValueError, msg):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaisesMessage(ValueError, msg):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>
<tr><th>Writer:</th><td><select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>
<li>Writer: <select name="writer" required>
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected>Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s" selected>Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
str(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>''')
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': str(self.w_royko.pk),
'article': 'Hello.',
'categories': [str(self.c1.id), str(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [str(self.c1.id), str(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" required></li>
<li>Slug: <input type="text" name="slug" maxlength="50" required></li>
<li>Pub date: <input type="text" name="pub_date" required></li>
<li>Writer: <select name="writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li>
<li>Categories: <select multiple name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected>---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
def test_recleaning_model_form_instance(self):
"""
Re-cleaning an instance that was added via a ModelForm shouldn't raise
a pk uniqueness error.
"""
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = '__all__'
form = AuthorForm({'full_name': 'Bob'})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Alice'
obj.full_clean()
class ModelMultipleChoiceFieldTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment')
cls.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test')
cls.c3 = Category.objects.create(name='Third', slug='third-test', url='third')
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(
f.clean([str(self.c1.id), str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False
)
self.assertQuerysetEqual(
f.clean([self.c1.id, str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False
)
self.assertQuerysetEqual(
f.clean((self.c1.id, str(self.c2.id))),
["Entertainment", "It's a test"], ordered=False
)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(), validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True, queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(
initial={'persons': [person1, person2]},
data={
'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)],
},
)
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
form = WriterForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
def test_to_field_name_with_initial_data(self):
class ArticleCategoriesForm(forms.ModelForm):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug')
class Meta:
model = Article
fields = ['categories']
article = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=Writer.objects.create(name='Test writer'),
article='Hello.',
)
article.categories.add(self.c2, self.c3)
form = ArticleCategoriesForm(instance=article)
self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug])
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)), ['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': str(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data['publication'])
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertIs(f.clean(False), False)
self.assertIs(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', str(form))
self.assertNotIn('myfile-clear', str(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', str(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertFalse(doc.myfile)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(
instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'},
)
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = str(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertHTMLEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file">'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')},
)
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm({'description': 'Assistance'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm({'description': 'New Description'}, instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(__file__), 'test.png')
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""FilePathField(blank=True) includes the empty option."""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
self.assertEqual([name for _, name in form['path'].field.choices], ['---------', 'models.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertIsNone(instance.width)
self.assertIsNone(instance.height)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)},
instance=instance,
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm({'description': 'New Description'}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
# Editing an instance that has an image without an extension shouldn't
# fail validation. First create:
f = NoExtensionImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)},
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/no_extension')
# Then edit:
f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance)
self.assertTrue(f.is_valid())
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitly including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
str(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet">
<script type="text/javascript" src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(3): # would be 4 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected>Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields), ['description', 'url'])
self.assertHTMLEqual(
str(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" required></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ['name'])
self.assertHTMLEqual(
str(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" required></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required></p>
<p><label for="id_colours">Colours:</label>
<select multiple name="colours" id="id_colours" required>
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = ('title', 'date_published', 'mode', 'category')
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label>
<input id="id_title" maxlength="30" name="title" type="text" required></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" required>
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}"></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected>direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di"></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected>Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3">
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ['name'])
self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), [])
self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo'])
self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name'])
self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age'])
self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name'])
def test_field_removal_name_clashes(self):
"""
Form fields can be removed in subclasses by setting them to None
(#22510).
"""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = ()
class LimitChoicesToTests(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
@classmethod
def setUpTestData(cls):
cls.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
cls.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToManyField can use limit_choices_to as a callable (#2554).
"""
stumpjokeform = StumpJokeForm()
self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood])
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
A custom field with a `queryset` attribute but no `limit_choices_to`
works (#23795).
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
def test_fields_for_model_applies_limit_choices_to(self):
fields = fields_for_model(StumpJoke, ['has_fooled_today'])
self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood])
def test_callable_called_each_time_form_is_instantiated(self):
field = StumpJokeForm.base_fields['most_recently_fooled']
with mock.patch.object(field, 'limit_choices_to') as today_callable_dict:
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 1)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 2)
StumpJokeForm()
self.assertEqual(today_callable_dict.call_count, 3)
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""A custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields:
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget)
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
msg = (
"BrokenLocalizedTripleForm.Meta.localized_fields "
"cannot be a string. Did you mean to type: ('foo',)?"
)
with self.assertRaisesMessage(TypeError, msg):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super().__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(SimpleTestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
class ModelToDictTests(TestCase):
def test_many_to_many(self):
"""Data for a ManyToManyField is a list rather than a lazy QuerySet."""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
item = ColourfulItem.objects.create()
item.colours.set([blue])
data = model_to_dict(item)['colours']
self.assertEqual(data, [blue])
item.colours.set([red])
# If data were a QuerySet, it would be reevaluated here and give "red"
# instead of the original value.
self.assertEqual(data, [blue])
| 0.002147 |
#!/usr/bin/env python
'''
DigitalOcean external inventory script
======================================
Generates Ansible inventory of DigitalOcean Droplets.
In addition to the --list and --host options used by Ansible, there are options
for generating JSON of other DigitalOcean data. This is useful when creating
droplets. For example, --regions will return all the DigitalOcean Regions.
This information can also be easily found in the cache file, whose default
location is /tmp/ansible-digital_ocean.cache).
The --pretty (-p) option pretty-prints the output for better human readability.
----
Although the cache stores all the information received from DigitalOcean,
the cache is not used for current droplet information (in --list, --host,
--all, and --droplets). This is so that accurate droplet information is always
found. You can force this script to use the cache with --force-cache.
----
Configuration is read from `digital_ocean.ini`, then from environment variables,
then and command-line arguments.
Most notably, the DigitalOcean API Token must be specified. It can be specified
in the INI file or with the following environment variables:
export DO_API_TOKEN='abc123' or
export DO_API_KEY='abc123'
Alternatively, it can be passed on the command-line with --api-token.
If you specify DigitalOcean credentials in the INI file, a handy way to
get them into your environment (e.g., to use the digital_ocean module)
is to use the output of the --env option with export:
export $(digital_ocean.py --env)
----
The following groups are generated from --list:
- ID (droplet ID)
- NAME (droplet NAME)
- image_ID
- image_NAME
- distro_NAME (distribution NAME from image)
- region_NAME
- size_NAME
- status_STATUS
When run against a specific host, this script returns the following variables:
- do_backup_ids
- do_created_at
- do_disk
- do_features - list
- do_id
- do_image - object
- do_ip_address
- do_kernel - object
- do_locked
- de_memory
- do_name
- do_networks - object
- do_next_backup_window
- do_region - object
- do_size - object
- do_size_slug
- do_snapshot_ids - list
- do_status
- do_vcpus
-----
```
usage: digital_ocean.py [-h] [--list] [--host HOST] [--all]
[--droplets] [--regions] [--images] [--sizes]
[--ssh-keys] [--domains] [--pretty]
[--cache-path CACHE_PATH]
[--cache-max_age CACHE_MAX_AGE]
[--force-cache]
[--refresh-cache]
[--api-token API_TOKEN]
Produce an Ansible Inventory file based on DigitalOcean credentials
optional arguments:
-h, --help show this help message and exit
--list List all active Droplets as Ansible inventory
(default: True)
--host HOST Get all Ansible inventory variables about a specific
Droplet
--all List all DigitalOcean information as JSON
--droplets List Droplets as JSON
--regions List Regions as JSON
--images List Images as JSON
--sizes List Sizes as JSON
--ssh-keys List SSH keys as JSON
--domains List Domains as JSON
--pretty, -p Pretty-print results
--cache-path CACHE_PATH
Path to the cache files (default: .)
--cache-max_age CACHE_MAX_AGE
Maximum age of the cached items (default: 0)
--force-cache Only use data from the cache
--refresh-cache Force refresh of cache by making API requests to
DigitalOcean (default: False - use cache files)
--api-token API_TOKEN, -a API_TOKEN
DigitalOcean API Token
```
'''
# (c) 2013, Evan Wies <[email protected]>
#
# Inspired by the EC2 inventory plugin:
# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import os
import sys
import re
import argparse
from time import time
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from dopy.manager import DoError, DoManager
except ImportError, e:
print "failed=True msg='`dopy` library required for this script'"
sys.exit(1)
class DigitalOceanInventory(object):
###########################################################################
# Main execution path
###########################################################################
def __init__(self):
''' Main execution path '''
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
# Define defaults
self.cache_path = '.'
self.cache_max_age = 0
# Read settings, environment variables, and CLI arguments
self.read_settings()
self.read_environment()
self.read_cli_args()
# Verify credentials were set
if not hasattr(self, 'api_token'):
print '''Could not find values for DigitalOcean api_token.
They must be specified via either ini file, command line argument (--api-token),
or environment variables (DO_API_TOKEN)'''
sys.exit(-1)
# env command, show DigitalOcean credentials
if self.args.env:
print "DO_API_TOKEN=%s" % self.api_token
sys.exit(0)
# Manage cache
self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache"
self.cache_refreshed = False
if self.is_cache_valid:
self.load_from_cache()
if len(self.data) == 0:
if self.args.force_cache:
print '''Cache is empty and --force-cache was specified'''
sys.exit(-1)
self.manager = DoManager(None, self.api_token, api_version=2)
# Pick the json_data to print based on the CLI command
if self.args.droplets:
self.load_from_digital_ocean('droplets')
json_data = {'droplets': self.data['droplets']}
elif self.args.regions:
self.load_from_digital_ocean('regions')
json_data = {'regions': self.data['regions']}
elif self.args.images:
self.load_from_digital_ocean('images')
json_data = {'images': self.data['images']}
elif self.args.sizes:
self.load_from_digital_ocean('sizes')
json_data = {'sizes': self.data['sizes']}
elif self.args.ssh_keys:
self.load_from_digital_ocean('ssh_keys')
json_data = {'ssh_keys': self.data['ssh_keys']}
elif self.args.domains:
self.load_from_digital_ocean('domains')
json_data = {'domains': self.data['domains']}
elif self.args.all:
self.load_from_digital_ocean()
json_data = self.data
elif self.args.host:
json_data = self.load_droplet_variables_for_host()
else: # '--list' this is last to make it default
self.load_from_digital_ocean('droplets')
self.build_inventory()
json_data = self.inventory
if self.cache_refreshed:
self.write_to_cache()
if self.args.pretty:
print json.dumps(json_data, sort_keys=True, indent=2)
else:
print json.dumps(json_data)
# That's all she wrote...
###########################################################################
# Script configuration
###########################################################################
def read_settings(self):
''' Reads the settings from the digital_ocean.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini')
# Credentials
if config.has_option('digital_ocean', 'api_token'):
self.api_token = config.get('digital_ocean', 'api_token')
# Cache related
if config.has_option('digital_ocean', 'cache_path'):
self.cache_path = config.get('digital_ocean', 'cache_path')
if config.has_option('digital_ocean', 'cache_max_age'):
self.cache_max_age = config.getint('digital_ocean', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Setup credentials
if os.getenv("DO_API_TOKEN"):
self.api_token = os.getenv("DO_API_TOKEN")
if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
if self.args.api_token:
self.api_token = self.args.api_token
# Make --list default if none of the other commands are specified
if (not self.args.droplets and not self.args.regions and
not self.args.images and not self.args.sizes and
not self.args.ssh_keys and not self.args.domains and
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
def load_from_digital_ocean(self, resource=None):
'''Get JSON from DigitalOcean API'''
if self.args.force_cache:
return
# We always get fresh droplets
if self.is_cache_valid() and not (resource=='droplets' or resource is None):
return
if self.args.refresh_cache:
resource=None
if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets()
self.cache_refreshed = True
if resource == 'regions' or resource is None:
self.data['regions'] = self.manager.all_regions()
self.cache_refreshed = True
if resource == 'images' or resource is None:
self.data['images'] = self.manager.all_images(filter=None)
self.cache_refreshed = True
if resource == 'sizes' or resource is None:
self.data['sizes'] = self.manager.sizes()
self.cache_refreshed = True
if resource == 'ssh_keys' or resource is None:
self.data['ssh_keys'] = self.manager.all_ssh_keys()
self.cache_refreshed = True
if resource == 'domains' or resource is None:
self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True
def build_inventory(self):
'''Build Ansible inventory of droplets'''
self.inventory = {}
# add all droplets by id and name
for droplet in self.data['droplets']:
dest = droplet['ip_address']
self.inventory[droplet['id']] = [dest]
self.push(self.inventory, droplet['name'], dest)
self.push(self.inventory, 'region_' + droplet['region']['slug'], dest)
self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest)
self.push(self.inventory, 'size_' + droplet['size']['slug'], dest)
image_slug = droplet['image']['slug']
if image_slug:
self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest)
else:
image_name = droplet['image']['name']
if image_name:
self.push(self.inventory, 'image_' + self.to_safe(image_name), dest)
self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest)
self.push(self.inventory, 'status_' + droplet['status'], dest)
def load_droplet_variables_for_host(self):
'''Generate a JSON response to a --host call'''
host = int(self.args.host)
droplet = self.manager.show_droplet(host)
# Put all the information in a 'do_' namespace
info = {}
for k, v in droplet.items():
info['do_'+k] = v
return {'droplet': info}
###########################################################################
# Cache Management
###########################################################################
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_filename):
mod_time = os.path.getmtime(self.cache_filename)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
return True
return False
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
try:
cache = open(self.cache_filename, 'r')
json_data = cache.read()
cache.close()
data = json.loads(json_data)
except IOError:
data = {'data': {}, 'inventory': {}}
self.data = data['data']
self.inventory = data['inventory']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'inventory': self.inventory }
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
cache.write(json_data)
cache.close()
###########################################################################
# Utilities
###########################################################################
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the dict '''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
###########################################################################
# Run the script
DigitalOceanInventory()
| 0.003719 |
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Associate a specific floating IP with an Instance
- quantum_floating_ip_associate:
state=present
login_username=admin
login_password=admin
login_tenant_name=admin
ip_address=1.1.1.1
instance_name=vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception, e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 0.010924 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Testing Admin Linux (Ubuntu)
Copyright 2014 Li Yun <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import admin
from admin import shell
from admin import build
import unittest
class AdminTestCase(unittest.TestCase):
'''Test Case of admin module.
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_update_seq_type(self):
seq = [1 ,2, 3]
admin.update_seq_type(seq, str)
for item in seq:
self.assertIsInstance(item, str)
class AdminShellTestCase(unittest.TestCase):
'''Test Case of admin.shell module.
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_ShellError(self):
with self.assertRaises(admin.AdminError):
raise admin.AdminError(KeyError)
def test_cpu_cores(self):
print('CPU Cores: ' + str(shell.cpu_cores()))
def test_read_lines(self):
# Invalid file name
with self.assertRaises(TypeError):
for line in shell.read_lines(None, 0):
pass
# File not exists
with self.assertRaises(IOError):
for line in shell.read_lines('Not-exist-file', 0):
pass
# Normal
banner_line = '#!/usr/bin/env python3'
for line in shell.read_lines(__file__, 1):
self.assertEqual(line, banner_line)
for index, line in enumerate(shell.read_lines(__file__, [1, 2, 3])):
self.assertIn(index, [0, 1, 2])
if index == 0:
self.assertEqual(line, banner_line)
elif index == 1:
self.assertEqual(line, '# -*- coding: utf-8 -*-')
elif index == 2:
self.assertEqual(line, '')
class AdminBuildTestCase(unittest.TestCase):
'''Test Case of admin.build module.
'''
def setUp(self):
self.version_info = 'Python 2.7.8'
self.version_prefix = 'Python'
def tearDown(self):
pass
def test_decode_version(self):
v = build.decode_version(self.version_info, self.version_prefix)
self.assertIsInstance(v, build.Version)
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 7)
self.assertEqual(v.patch, 8)
def test_match(self):
v = build.decode_version(self.version_info, self.version_prefix)
self.assertTrue(build.match_version(v, '2.7.8'))
self.assertTrue(build.match_version(v, '2.7.6'))
self.assertFalse(build.match_version(v, '2.7.9'))
self.assertTrue(build.match_version(v, '2.6.8'))
self.assertTrue(build.match_version(v, '2.6.6'))
self.assertTrue(build.match_version(v, '2.6.9'))
self.assertFalse(build.match_version(v, '2.8.8'))
self.assertFalse(build.match_version(v, '2.8.6'))
self.assertFalse(build.match_version(v, '2.8.9'))
self.assertTrue(build.match_version(v, '1.7.8'))
self.assertTrue(build.match_version(v, '1.8.8'))
self.assertTrue(build.match_version(v, '1.6.8'))
self.assertTrue(build.match_version(v, '1.7.6'))
self.assertTrue(build.match_version(v, '1.7.9'))
self.assertFalse(build.match_version(v, '3.7.8'))
self.assertFalse(build.match_version(v, '3.8.8'))
self.assertFalse(build.match_version(v, '3.6.8'))
self.assertFalse(build.match_version(v, '3.7.6'))
self.assertFalse(build.match_version(v, '3.7.9'))
if __name__ == '__main__':
unittest.main()
| 0.008965 |
#Synonyms experiment. Pass a string to see its "synonyms"
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def main():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
#model.save("word2vec-model")
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
if __name__ == '__main__':
main()
| 0.007047 |
'''
Icon generator
==============
This tool will help you to generate all the icons wanted for Google Play Store,
App Store, Amazon store.
'''
import sys
from PIL import Image
from os.path import exists, join, realpath, basename, dirname
from os import makedirs
from argparse import ArgumentParser
class Converter(object):
converters = {
'appstore': {
'directory_name': 'ios',
'sizes': [
('App store high resolution', '{}-appstore-1024.png', 1024),
('App store normal resolution', '{}-appstore-512.png', 512),
# iOS 7
('iPhone (iOS 7)', '{}-60.png', 120),
('iPhone @2 (iOS 7)', '{}[email protected]', 120),
('iPad (iOS 7)', '{}-76.png', 76),
('iPad @2 (iOS 7)', '{}[email protected]', 152),
# iOS 6.1 and earlier
('iPhone (iOS >= 6.1)', '{}-57.png', 57),
('iPhone @2 (iOS >= 6.1)', '{}[email protected]', 114),
('iPad (iOS >= 6.1)', '{}-72.png', 72),
('iPad @2 (iOS >= 6.1)', '{}[email protected]', 114),
# iTunes artwork (ad-hoc)
('iTunes Artwork (ad-hoc)', 'iTunesArtwork', 512),
('iTunes Artwork @2 (ad-hoc)', 'iTunesArtwork@2x', 1024),
]},
'playstore': {
'directory_name': 'android',
'sizes': [
('Google Play icon', '{}-googleplay-512.png', 512),
('Launcher icon MDPI', '{}-48.png', 48),
('Launcher icon HDPI', '{}-72.png', 72),
('Launcher icon XHDPI', '{}-96.png', 96),
('Launcher icon XXHDPI', '{}-144.png', 48),
('Launcher icon XXXHDPI', '{}-192.png', 192),
]},
'amazonstore': {
'directory_name': 'amazon',
'sizes': [
('Small icon', '{}-114.png', 114),
('Large icon', '{}-512.png', 512),
]}}
def run(self):
parser = ArgumentParser(
description='Generate icons for various stores')
parser.add_argument('--dir', type=str, default=None,
help=('Output directory to generate all the icons,'
'defaults to the directory of the source icon'))
parser.add_argument('--force', type=bool, default=False,
help=('Generate all icons even if the source is not perfect.'))
parser.add_argument('icon', type=str,
help='Base icon (must be 1024x1024 or 512x512)')
args = parser.parse_args()
if not exists(args.icon):
print('Error: No such icon file')
sys.exit(1)
# ensure the destination directory will be set
if args.dir is None:
args.dir = dirname(args.icon)
# read the source image, and do some quality checks
base_fn = basename(args.icon).rsplit('.', 1)[0]
source = Image.open(args.icon)
self.ensure_quality(source, args.force)
for directory_name, sizeinfo in self.iterate():
description, pattern_fn, size = sizeinfo
print('Generate {}: {}x{}'.format(description, size, size))
dest_dir = realpath(join(args.dir, directory_name))
if not exists(dest_dir):
makedirs(dest_dir)
icon_fn = join(dest_dir, pattern_fn.format('Icon'))
self.convert_to(source, icon_fn, size)
def convert_to(self, source, icon_fn, size):
dest = source.resize((size, size))
dest.save(icon_fn, 'png')
def ensure_quality(self, image, force=False):
messages = []
w, h = image.size
if w != h:
messages.append('Width and height should be the same')
if w not in (512, 1024):
messages.append(
'Source image is recommended to be 1024 (512 minimum)')
if not messages:
return
print('Quality check failed')
for message in messages:
print('- {}'.format(message))
if not force:
sys.exit(1)
def iterate(self):
for store, infos in Converter.converters.items():
for size in infos['sizes']:
yield infos['directory_name'], size
if __name__ == '__main__':
Converter().run()
| 0.000692 |
import re
from collections import OrderedDict
class RemotesMixin():
def get_remotes(self):
"""
Get a list of remotes, provided as tuples of remote name and remote
url/resource.
"""
entries = self.git("remote", "-v").splitlines()
return OrderedDict(re.match("([0-9a-zA-Z_-]+)\t([^ ]+)", entry).groups() for entry in entries)
def fetch(self, remote=None, prune=True):
"""
If provided, fetch all changes from `remote`. Otherwise, fetch
changes from all remotes.
"""
self.git("fetch", "--prune" if prune else None, remote if remote else "--all")
def get_remote_branches(self):
"""
Return a list of all known branches on remotes.
"""
stdout = self.git("branch", "-r", "--no-color")
return [branch.strip() for branch in stdout.split("\n") if branch]
def pull(self, remote=None, branch=None):
"""
Pull from the specified remote and branch if provided, otherwise
perform default `git pull`.
"""
self.git("pull", remote, branch)
def push(self, remote=None, branch=None, force=False, local_branch=None, set_upstream=False):
"""
Push to the specified remote and branch if provided, otherwise
perform default `git push`.
"""
return self.git(
"push",
"--force" if force else None,
"--set-upstream" if set_upstream else None,
remote,
branch if not local_branch else "{}:{}".format(local_branch, branch)
)
def get_upstream_for_active_branch(self):
"""
Return ref for remote tracking branch.
"""
return self.git("rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}", throw_on_stderr=False)
| 0.00274 |
# load projection and helper functions
import numpy as np
import skymapper as skm
def getCatalog(size=10000, survey=None):
# dummy catalog: uniform on sphere
# Marsaglia (1972)
xyz = np.random.normal(size=(size, 3))
r = np.sqrt((xyz**2).sum(axis=1))
dec = np.arccos(xyz[:,2]/r) / skm.DEG2RAD - 90
ra = - np.arctan2(xyz[:,0], xyz[:,1]) / skm.DEG2RAD
if survey is not None:
inside = survey.contains(ra, dec)
ra = ra[inside]
dec = dec[inside]
return ra, dec
def makeHealpixMap(ra, dec, nside=1024, nest=False):
# convert a ra/dec catalog into healpix map with counts per cell
import healpy as hp
ipix = hp.ang2pix(nside, (90-dec)/180*np.pi, ra/180*np.pi, nest=nest)
return np.bincount(ipix, minlength=hp.nside2npix(nside))
def getHealpixCoords(pixels, nside, nest=False):
# convert healpix cell indices to center ra/dec
import healpy as hp
theta, phi = hp.pix2ang(nside, pixels, nest=nest)
return phi * 180. / np.pi, 90 - theta * 180. / np.pi
if __name__ == "__main__":
# load RA/Dec from catalog
size = 100000
des = skm.survey.DES()
ra, dec = getCatalog(size, survey=des)
# define the best Albers projection for the footprint
# minimizing the variation in distortion
crit = skm.stdDistortion
proj = skm.Albers.optimize(ra, dec, crit=crit)
# construct map: will hold figure and projection
# the outline of the sphere can be styled with kwargs for matplotlib Polygon
map = skm.Map(proj)
# add graticules, separated by 15 deg
# the lines can be styled with kwargs for matplotlib Line2D
# additional arguments for formatting the graticule labels
sep=15
map.grid(sep=sep)
# # add footprint, retain the polygon for clipping
# footprint = map.footprint("DES", zorder=20, edgecolor='#2222B2', facecolor='None', lw=1)
#
#### 1. plot density in healpix cells ####
nside = 32
mappable = map.density(ra, dec, nside=nside)
cb = map.colorbar(mappable, cb_label="$n$ [arcmin$^{-2}$]")
# add random scatter plot
nsamples = 10
size = 100*np.random.rand(nsamples)
map.scatter(ra[:nsamples], dec[:nsamples], s=size, edgecolor='k', facecolor='None')
# focus on relevant region
map.focus(ra, dec)
# entitle: access mpl figure
map.title('Density with random scatter')
# copy map without data contents
map2 = map.clone()
#### 2. show map distortion over the survey ####
a,b = proj.distortion(ra, dec)
mappable2 = map2.hexbin(ra, dec, C=1-np.abs(b/a), vmin=0, vmax=0.3, cmap='RdYlBu_r')
cb2 = map2.colorbar(mappable2, cb_label='Distortion')
map2.title('Projection distortion')
#### 3. extrapolate RA over all sky ####
map3 = skm.Map(proj)
# show with 45 deg graticules
sep=45
map3.grid(sep=sep)
# alter number of labels at the south pole
map3.labelMeridiansAtParallel(-90, size=8, meridians=np.arange(0,360,90))
# this is slow when working with lots of samples...
mappable3 = map3.extrapolate(ra[::100], dec[::100], dec[::100], resolution=100)
cb3 = map3.colorbar(mappable3, cb_label='Dec')
# add footprint shade
footprint3 = map3.footprint(des, nside=nside, zorder=20, facecolors='w', alpha=0.3)
map3.title('Extrapolation on the sphere')
#### 4. test Healpix map functions ####
map4 = map.clone()
# simply bin the counts of ra/dec
m = makeHealpixMap(ra, dec, nside=nside)
mappable4 = map4.healpix(m, cmap="YlOrRd")
cb4 = map4.colorbar(mappable4, cb_label="Healpix cell count")
map4.title('Healpix map')
| 0.005798 |
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so, will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# XXX Make sure all public names have no single leading underscore and all
# others do.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
# TODO: Expose from marshal
def _w_long(x):
"""Convert a 32-bit integer to little-endian.
XXX Temporary until marshal's long functions are exposed.
"""
x = int(x)
int_bytes = []
int_bytes.append(x & 0xFF)
int_bytes.append((x >> 8) & 0xFF)
int_bytes.append((x >> 16) & 0xFF)
int_bytes.append((x >> 24) & 0xFF)
return bytearray(int_bytes)
# TODO: Expose from marshal
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
new_parts = []
for part in path_parts:
if not part:
continue
new_parts.append(part)
if part[-1] not in path_separators:
new_parts.append(path_sep)
return ''.join(new_parts[:-1]) # Drop superfluous path separator.
def _path_split(path):
"""Replacement for os.path.split()."""
for x in reversed(path):
if x in path_separators:
sep = x
break
else:
sep = path_sep
front, _, tail = path.rpartition(sep)
return front, tail
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _os.stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
# XXX Could also expose Modules/getpath.c:isfile()
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
# XXX Could also expose Modules/getpath.c:isdir()
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_code_type = type(_wrap.__code__)
def new_module(name):
"""Create a new module.
The module is not entered into sys.modules.
"""
return type(_io)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError("deadlock detected by %r" % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError("cannot release un-acquired lock")
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return "_ModuleLock(%r) at %d" % (self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError("cannot release un-acquired lock")
self.count -= 1
def __repr__(self):
return "_DummyModuleLock(%r) at %d" % (self.name, id(self))
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
"""Magic word to reject .pyc files generated by other Python versions.
It should change for each incompatible change to the bytecode.
The value of CR and LF is incorporated so if you ever read or write
a .pyc file in text mode the magic number will be wrong; also, the
Apple MPW compiler swaps their values, botching string constants.
The magic numbers must be spaced apart at least 2 values, as the
-U interpeter flag will cause MAGIC+1 being used. They have been
odd numbers for some time now.
There were a variety of old schemes for setting the magic number.
The current working scheme is to increment the previous value by
10.
Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
number also includes a new "magic tag", i.e. a human readable string used
to represent the magic number in __pycache__ directories. When you change
the magic number, you must also set a new unique magic tag. Generally this
can be named after the Python major version of the magic number bump, but
it can really be anything, as long as it's different than anything else
that's come before. The tags are included in the following table, starting
with Python 3.2a0.
Known values:
Python 1.5: 20121
Python 1.5.1: 20121
Python 1.5.2: 20121
Python 1.6: 50428
Python 2.0: 50823
Python 2.0.1: 50823
Python 2.1: 60202
Python 2.1.1: 60202
Python 2.1.2: 60202
Python 2.2: 60717
Python 2.3a0: 62011
Python 2.3a0: 62021
Python 2.3a0: 62011 (!)
Python 2.4a0: 62041
Python 2.4a3: 62051
Python 2.4b1: 62061
Python 2.5a0: 62071
Python 2.5a0: 62081 (ast-branch)
Python 2.5a0: 62091 (with)
Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
Python 2.5b3: 62101 (fix wrong code: for x, in ...)
Python 2.5b3: 62111 (fix wrong code: x += yield)
Python 2.5c1: 62121 (fix wrong lnotab with for loops and
storing constants that should have been removed)
Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
Python 2.6a1: 62161 (WITH_CLEANUP optimization)
Python 3000: 3000
3010 (removed UNARY_CONVERT)
3020 (added BUILD_SET)
3030 (added keyword-only parameters)
3040 (added signature annotations)
3050 (print becomes a function)
3060 (PEP 3115 metaclass syntax)
3061 (string literals become unicode)
3071 (PEP 3109 raise changes)
3081 (PEP 3137 make __file__ and __name__ unicode)
3091 (kill str8 interning)
3101 (merge from 2.6a0, see 62151)
3103 (__file__ points to source file)
Python 3.0a4: 3111 (WITH_CLEANUP optimization).
Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
change LIST_APPEND and SET_ADD, add MAP_ADD)
Python 3.1a0: 3151 (optimize conditional branches:
introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
Python 3.2a0: 3160 (add SETUP_WITH)
tag: cpython-32
Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
tag: cpython-32
Python 3.2a2 3180 (add DELETE_DEREF)
Python 3.3a0 3190 __class__ super closure changed
Python 3.3a0 3200 (__qualname__ added)
3210 (added size modulo 2**32 to the pyc header)
Python 3.3a1 3220 (changed PEP 380 implementation)
Python 3.3a4 3230 (revert changes to implicit __class__ closure)
MAGIC must change whenever the bytecode emitted by the compiler may no
longer be understood by older implementations of the eval loop (usually
due to the addition of new opcodes).
"""
_RAW_MAGIC_NUMBER = 3230 | ord('\r') << 16 | ord('\n') << 24
_MAGIC_BYTES = bytes(_RAW_MAGIC_NUMBER >> n & 0xff for n in range(0, 25, 8))
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base_filename, sep, _ = tail.partition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([base_filename, sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def set_package(fxn):
"""Set __package__ on the returned module."""
def set_package_wrapper(*args, **kwargs):
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
_wrap(set_package_wrapper, fxn)
return set_package_wrapper
def set_loader(fxn):
"""Set __loader__ on the returned module."""
def set_loader_wrapper(self, *args, **kwargs):
module = fxn(self, *args, **kwargs)
if not hasattr(module, '__loader__'):
module.__loader__ = self
return module
_wrap(set_loader_wrapper, fxn)
return set_loader_wrapper
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
module = sys.modules.get(fullname)
is_reload = module is not None
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = new_module(fullname)
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
module.__initializing__ = True
sys.modules[fullname] = module
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
else:
module.__initializing__ = True
try:
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
except:
if not is_reload:
del sys.modules[fullname]
raise
finally:
module.__initializing__ = False
_wrap(module_for_loader_wrapper, fxn)
return module_for_loader_wrapper
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError("loader cannot handle %s" % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError("{} is not a built-in module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError("{} is not a frozen module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader()."""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = "Not importing directory {}: missing __init__"
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, module):
return "<module '{}' (built-in)>".format(module.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
"""
if path is not None:
return None
return cls if _imp.is_builtin(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
is_reload = fullname in sys.modules
try:
return _call_with_frames_removed(_imp.init_builtin, fullname)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, m):
return "<module '{}' (frozen)>".format(m.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module."""
return cls if _imp.is_frozen(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_frozen
def load_module(cls, fullname):
"""Load a frozen module."""
is_reload = fullname in sys.modules
try:
m = _call_with_frames_removed(_imp.init_frozen, fullname)
# Let our own module_repr() method produce a suitable repr.
del m.__file__
return m
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry.
"""
REGISTRY_KEY = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}")
REGISTRY_KEY_DEBUG = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}\\Debug")
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except WindowsError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, "")
except WindowsError:
return None
return filepath
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry."""
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_os.stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
return loader(fullname, filepath)
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def _bytes_from_bytecode(self, fullname, data, bytecode_path, source_stats):
"""Return the marshalled bytes from bytecode, verifying the magic
number, timestamp and source size along the way.
If source_stats is None then skip the timestamp check.
"""
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != _MAGIC_BYTES:
msg = 'bad magic number in {!r}: {!r}'.format(fullname, magic)
_verbose_message(msg)
raise ImportError(msg, name=fullname, path=bytecode_path)
elif len(raw_timestamp) != 4:
message = 'bad timestamp in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'bad size in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {}'.format(fullname)
_verbose_message(message)
raise ImportError(message, name=fullname,
path=bytecode_path)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError(
"bytecode is stale for {}".format(fullname),
name=fullname, path=bytecode_path)
# Can't return the code object as errors from marshal loading need to
# propagate even when source is available.
return data[12:]
@module_for_loader
def _load_module(self, module, *, sourceless=False):
"""Helper for load_module able to handle either source or sourceless
loading."""
name = module.__name__
code_object = self.get_code(name)
module.__file__ = self.get_filename(name)
if not sourceless:
try:
module.__cached__ = cache_from_source(module.__file__)
except NotImplementedError:
module.__cached__ = module.__file__
else:
module.__cached__ = module.__file__
module.__package__ = name
if self.is_package(name):
module.__path__ = [_path_split(module.__file__)[0]]
else:
module.__package__ = module.__package__.rpartition('.')[0]
module.__loader__ = self
_call_with_frames_removed(exec, code_object, module.__dict__)
return module
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
"""
raise NotImplementedError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
raise NotImplementedError
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
import tokenize
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except IOError as exc:
raise ImportError("source not available through get_data()",
name=fullname) from exc
readsource = _io.BytesIO(source_bytes).readline
try:
encoding = tokenize.detect_encoding(readsource)
except SyntaxError as exc:
raise ImportError("Failed to detect encoding",
name=fullname) from exc
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
try:
return newline_decoder.decode(source_bytes.decode(encoding[0]))
except UnicodeDecodeError as exc:
raise ImportError("Failed to decode source file",
name=fullname) from exc
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except NotImplementedError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except IOError:
pass
else:
try:
bytes_data = self._bytes_from_bytecode(fullname, data,
bytecode_path,
st)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_imp._fix_co_filename(found, source_path)
_verbose_message('code object from {}',
bytecode_path)
return found
else:
msg = "Non-code object in {}"
raise ImportError(msg.format(bytecode_path),
name=fullname, path=bytecode_path)
source_bytes = self.get_data(source_path)
code_object = _call_with_frames_removed(compile,
source_bytes, source_path, 'exec',
dont_inherit=True)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = bytearray(_MAGIC_BYTES)
data.extend(_w_long(source_mtime))
data.extend(_w_long(len(source_bytes)))
data.extend(marshal.dumps(code_object))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
def load_module(self, fullname):
"""Concrete implementation of Loader.load_module.
Requires ExecutionLoader.get_filename and ResourceLoader.get_data to be
implemented to load source code. Use of bytecode is dictated by whether
get_code uses/writes bytecode.
"""
return self._load_module(fullname)
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
@_check_name
def load_module(self, fullname):
"""Load a module from a file."""
# Issue #14857: Avoid the zero-argument form so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _os.stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
try:
mode = _os.stat(source_path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def load_module(self, fullname):
return self._load_module(fullname, sourceless=True)
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = self._bytes_from_bytecode(fullname, data, path, None)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_verbose_message('code object from {!r}', path)
return found
else:
raise ImportError("Non-code object in {}".format(path),
name=fullname, path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
@_check_name
@set_package
@set_loader
def load_module(self, fullname):
"""Load an extension module."""
is_reload = fullname in sys.modules
try:
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
if self.is_package(fullname) and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
return module
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
loader, new_path = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if loader is None:
self._path = new_path
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return "_NamespacePath({!r})".format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
class NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
return "<module '{}' (namespace)>".format(module.__name__)
@module_for_loader
def load_module(self, module):
"""Load a namespace module."""
_verbose_message('namespace module loaded with path {!r}', self._path)
module.__path__ = self._path
return module
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = '.'
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _get_loader(cls, fullname, path):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
# We found a loader: return it immediately.
return loader, namespace_path
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
return None, namespace_path
@classmethod
def find_module(cls, fullname, path=None):
"""Find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
loader, namespace_path = cls._get_loader(fullname, path)
if loader is not None:
return loader
else:
if namespace_path:
# We found at least one namespace path. Return a
# loader which can create the namespace package.
return NamespaceLoader(fullname, namespace_path, cls._get_loader)
else:
return None
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _os.stat(self.path).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
if _path_isdir(base_path):
for suffix, loader in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return (loader(fullname, full_path), [base_path])
else:
# A namespace package, return the path if we don't also
# find a module in the next section.
is_namespace = True
# Check for a file w/ a proper suffix exists.
for suffix, loader in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return (loader(fullname, full_path), [])
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
return (None, [base_path])
return (None, [])
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path)
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = set(fn.lower() for fn in contents)
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError("only directories are supported", path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return "FileFinder(%r)" % (self.path,)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_module(name, path):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
for finder in sys.meta_path:
with _ImportLockContext():
loader = finder.find_module(name, path)
if loader is not None:
# The parent import may have already imported this module.
if name not in sys.modules:
return loader
else:
return sys.modules[name].__loader__
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError("module name must be str, not {}".format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError("__package__ not set to a string")
elif package not in sys.modules:
msg = ("Parent module {!r} not loaded, cannot perform relative "
"import")
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError("Empty module name")
_ERR_MSG = 'No module named {!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
# Backwards-compatibility; be nicer to skip the dict lookup.
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {} is not a package').format(name, parent)
raise ImportError(msg, name=name)
loader = _find_module(name, path)
if loader is None:
exc = ImportError(_ERR_MSG.format(name), name=name)
# TODO(brett): switch to a proper ModuleNotFound exception in Python
# 3.4.
exc._not_found = True
raise exc
elif name not in sys.modules:
# The parent import may have already imported this module.
loader.load_module(name)
_verbose_message('import {!r} # {!r}', name, loader)
# Backwards-compatibility; be nicer to skip the dict lookup.
module = sys.modules[name]
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
# Set __package__ if the loader did not.
if getattr(module, '__package__', None) is None:
try:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
except AttributeError:
pass
# Set loader if need be.
if not hasattr(module, '__loader__'):
try:
module.__loader__ = loader
except AttributeError:
pass
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
try:
lock = _get_module_lock(name)
finally:
_imp.release_lock()
lock.acquire()
try:
return _find_and_load_unlocked(name, import_)
finally:
lock.release()
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ("import of {} halted; "
"None in sys.modules".format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
# TODO(brett): In Python 3.4, have import raise
# ModuleNotFound and catch that.
if getattr(exc, '_not_found', False):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if not hasattr(module, '__loader__'):
if name in sys.builtin_module_names:
module.__loader__ = BuiltinImporter
elif _imp.is_frozen(name):
module.__loader__ = FrozenImporter
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = BuiltinImporter.load_module(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
os_details = ('posix', ['/']), ('nt', ['\\', '/']), ('os2', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = BuiltinImporter.load_module(builtin_os)
# TODO: rip out os2 code after 3.3 is released as per PEP 11
if builtin_os == 'os2' and 'EMX GCC' in sys.version:
path_sep = path_separators[1]
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
try:
thread_module = BuiltinImporter.load_module('_thread')
except ImportError:
# Python was built without threads
thread_module = None
weakref_module = BuiltinImporter.load_module('_weakref')
if builtin_os == 'nt':
winreg_module = BuiltinImporter.load_module('winreg')
setattr(self_module, '_winreg', winreg_module)
setattr(self_module, '_os', os_module)
setattr(self_module, '_thread', thread_module)
setattr(self_module, '_weakref', weakref_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', set(path_separators))
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
| 0.00044 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt, cstr, nowdate, nowtime
from erpnext.stock.utils import update_bin
from erpnext.stock.stock_ledger import update_entries_after
def repost(only_actual=False, allow_negative_stock=False, allow_zero_rate=False, only_bin=False):
"""
Repost everything!
"""
frappe.db.auto_commit_on_many_writes = 1
if allow_negative_stock:
existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
for d in frappe.db.sql("""select distinct item_code, warehouse from
(select item_code, warehouse from tabBin
union
select item_code, warehouse from `tabStock Ledger Entry`) a"""):
try:
repost_stock(d[0], d[1], allow_zero_rate, only_actual, only_bin)
frappe.db.commit()
except:
frappe.db.rollback()
if allow_negative_stock:
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
frappe.db.auto_commit_on_many_writes = 0
def repost_stock(item_code, warehouse, allow_zero_rate=False, only_actual=False, only_bin=False):
if not only_bin:
repost_actual_qty(item_code, warehouse, allow_zero_rate)
if item_code and warehouse and not only_actual:
qty_dict = {
"reserved_qty": get_reserved_qty(item_code, warehouse),
"indented_qty": get_indented_qty(item_code, warehouse),
"ordered_qty": get_ordered_qty(item_code, warehouse),
"planned_qty": get_planned_qty(item_code, warehouse)
}
if only_bin:
qty_dict.update({
"actual_qty": get_balance_qty_from_sle(item_code, warehouse)
})
update_bin_qty(item_code, warehouse, qty_dict)
def repost_actual_qty(item_code, warehouse, allow_zero_rate=False):
try:
update_entries_after({ "item_code": item_code, "warehouse": warehouse }, allow_zero_rate)
except:
pass
def get_balance_qty_from_sle(item_code, warehouse):
balance_qty = frappe.db.sql("""select qty_after_transaction from `tabStock Ledger Entry`
where item_code=%s and warehouse=%s and is_cancelled='No'
order by posting_date desc, posting_time desc, name desc
limit 1""", (item_code, warehouse))
return flt(balance_qty[0][0]) if balance_qty else 0.0
def get_reserved_qty(item_code, warehouse):
reserved_qty = frappe.db.sql("""
select
sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty))
from
(
(select
qty as dnpi_qty,
(
select qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and (delivered_by_supplier is null or delivered_by_supplier = 0)
) as so_item_qty,
(
select delivered_qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and delivered_by_supplier = 0
) as so_item_delivered_qty,
parent, name
from
(
select qty, parent_detail_docname, parent, name
from `tabPacked Item` dnpi_in
where item_code = %s and warehouse = %s
and parenttype="Sales Order"
and item_code != parent_item
and exists (select * from `tabSales Order` so
where name = dnpi_in.parent and docstatus = 1 and status != 'Closed')
) dnpi)
union
(select stock_qty as dnpi_qty, qty as so_item_qty,
delivered_qty as so_item_delivered_qty, parent, name
from `tabSales Order Item` so_item
where item_code = %s and warehouse = %s
and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0)
and exists(select * from `tabSales Order` so
where so.name = so_item.parent and so.docstatus = 1
and so.status != 'Closed'))
) tab
where
so_item_qty >= so_item_delivered_qty
""", (item_code, warehouse, item_code, warehouse))
return flt(reserved_qty[0][0]) if reserved_qty else 0
def get_indented_qty(item_code, warehouse):
indented_qty = frappe.db.sql("""select sum(mr_item.qty - mr_item.ordered_qty)
from `tabMaterial Request Item` mr_item, `tabMaterial Request` mr
where mr_item.item_code=%s and mr_item.warehouse=%s
and mr_item.qty > mr_item.ordered_qty and mr_item.parent=mr.name
and mr.status!='Stopped' and mr.docstatus=1""", (item_code, warehouse))
return flt(indented_qty[0][0]) if indented_qty else 0
def get_ordered_qty(item_code, warehouse):
ordered_qty = frappe.db.sql("""
select sum((po_item.qty - po_item.received_qty)*po_item.conversion_factor)
from `tabPurchase Order Item` po_item, `tabPurchase Order` po
where po_item.item_code=%s and po_item.warehouse=%s
and po_item.qty > po_item.received_qty and po_item.parent=po.name
and po.status not in ('Closed', 'Delivered') and po.docstatus=1
and po_item.delivered_by_supplier = 0""", (item_code, warehouse))
return flt(ordered_qty[0][0]) if ordered_qty else 0
def get_planned_qty(item_code, warehouse):
planned_qty = frappe.db.sql("""
select sum(qty - produced_qty) from `tabProduction Order`
where production_item = %s and fg_warehouse = %s and status not in ("Stopped", "Completed")
and docstatus=1 and qty > produced_qty""", (item_code, warehouse))
return flt(planned_qty[0][0]) if planned_qty else 0
def update_bin_qty(item_code, warehouse, qty_dict=None):
from erpnext.stock.utils import get_bin
bin = get_bin(item_code, warehouse)
mismatch = False
for fld, val in qty_dict.items():
if flt(bin.get(fld)) != flt(val):
bin.set(fld, flt(val))
mismatch = True
if mismatch:
bin.projected_qty = (flt(bin.actual_qty) + flt(bin.ordered_qty) +
flt(bin.indented_qty) + flt(bin.planned_qty) - flt(bin.reserved_qty)
- flt(bin.reserved_qty_for_production)) - flt(bin.reserved_qty_for_sub_contract)
bin.save()
def set_stock_balance_as_per_serial_no(item_code=None, posting_date=None, posting_time=None,
fiscal_year=None):
if not posting_date: posting_date = nowdate()
if not posting_time: posting_time = nowtime()
condition = " and item.name='%s'" % item_code.replace("'", "\'") if item_code else ""
bin = frappe.db.sql("""select bin.item_code, bin.warehouse, bin.actual_qty, item.stock_uom
from `tabBin` bin, tabItem item
where bin.item_code = item.name and item.has_serial_no = 1 %s""" % condition)
for d in bin:
serial_nos = frappe.db.sql("""select count(name) from `tabSerial No`
where item_code=%s and warehouse=%s and docstatus < 2""", (d[0], d[1]))
if serial_nos and flt(serial_nos[0][0]) != flt(d[2]):
print(d[0], d[1], d[2], serial_nos[0][0])
sle = frappe.db.sql("""select valuation_rate, company from `tabStock Ledger Entry`
where item_code = %s and warehouse = %s and ifnull(is_cancelled, 'No') = 'No'
order by posting_date desc limit 1""", (d[0], d[1]))
sle_dict = {
'doctype' : 'Stock Ledger Entry',
'item_code' : d[0],
'warehouse' : d[1],
'transaction_date' : nowdate(),
'posting_date' : posting_date,
'posting_time' : posting_time,
'voucher_type' : 'Stock Reconciliation (Manual)',
'voucher_no' : '',
'voucher_detail_no' : '',
'actual_qty' : flt(serial_nos[0][0]) - flt(d[2]),
'stock_uom' : d[3],
'incoming_rate' : sle and flt(serial_nos[0][0]) > flt(d[2]) and flt(sle[0][0]) or 0,
'company' : sle and cstr(sle[0][1]) or 0,
'is_cancelled' : 'No',
'batch_no' : '',
'serial_no' : ''
}
sle_doc = frappe.get_doc(sle_dict)
sle_doc.flags.ignore_validate = True
sle_doc.flags.ignore_links = True
sle_doc.insert()
args = sle_dict.copy()
args.update({
"sle_id": sle_doc.name,
"is_amended": 'No'
})
update_bin(args)
update_entries_after({
"item_code": d[0],
"warehouse": d[1],
"posting_date": posting_date,
"posting_time": posting_time
})
def reset_serial_no_status_and_warehouse(serial_nos=None):
if not serial_nos:
serial_nos = frappe.db.sql_list("""select name from `tabSerial No` where docstatus = 0""")
for serial_no in serial_nos:
try:
sr = frappe.get_doc("Serial No", serial_no)
last_sle = sr.get_last_sle()
if flt(last_sle.actual_qty) > 0:
sr.warehouse = last_sle.warehouse
sr.via_stock_ledger = True
sr.save()
except:
pass
def repost_all_stock_vouchers():
warehouses_with_account = frappe.db.sql_list("""select warehouse from tabAccount
where ifnull(account_type, '') = 'Stock' and (warehouse is not null and warehouse != '')
and is_group=0""")
vouchers = frappe.db.sql("""select distinct voucher_type, voucher_no
from `tabStock Ledger Entry` sle
where voucher_type != "Serial No" and sle.warehouse in (%s)
order by posting_date, posting_time, name""" %
', '.join(['%s']*len(warehouses_with_account)), tuple(warehouses_with_account))
rejected = []
i = 0
for voucher_type, voucher_no in vouchers:
i+=1
print(i, "/", len(vouchers), voucher_type, voucher_no)
try:
for dt in ["Stock Ledger Entry", "GL Entry"]:
frappe.db.sql("""delete from `tab%s` where voucher_type=%s and voucher_no=%s"""%
(dt, '%s', '%s'), (voucher_type, voucher_no))
doc = frappe.get_doc(voucher_type, voucher_no)
if voucher_type=="Stock Entry" and doc.purpose in ["Manufacture", "Repack"]:
doc.calculate_rate_and_amount(force=1)
elif voucher_type=="Purchase Receipt" and doc.is_subcontracted == "Yes":
doc.validate()
doc.update_stock_ledger()
doc.make_gl_entries(repost_future_gle=False)
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print(rejected)
| 0.028113 |
from bw2python import ponames
from bw2python.bwtypes import PayloadObject
from bw2python.client import Client
import smap
import msgpack
import datetime
import time
bw_client = Client()
bw_client.setEntityFromEnviron()
bw_client.overrideAutoChainTo(True)
thermostat = smap.IMT550C()
def toHandle(bw_message):
for po in bw_message.payload_objects:
if po.type_dotted == (2, 1, 1, 0):
to_process = msgpack.unpackb(po.content)
print to_process
thermostat.set_state(to_process)
bw_client.subscribe('{0}/slot/state'.format(thermostat.uri), toHandle)
while True:
msg = thermostat.get_state()
po = PayloadObject((2, 1, 1, 0), None, msgpack.packb(msg))
bw_client.publish('{0}/signal/info'.format(thermostat.uri), payload_objects=(po,), persist=True)
time.sleep(thermostat.sample_rate)
#RFC 3339 timestamp UTC
d = datetime.datetime.utcnow()
timestamp = {'ts': int(time.time()*1e9), 'val': d.isoformat('T')}
po2 = PayloadObject((2, 0, 3, 1), None, msgpack.packb(timestamp))
bw_client.publish('{0}/!meta/lastalive'.format(thermostat.uri), payload_objects=(po2,), persist=True)
| 0.016187 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
class wiki_wiki_page_open(osv.osv_memory):
""" wizard Open Page """
_name = "wiki.wiki.page.open"
_description = "wiz open page"
def open_wiki_page(self, cr, uid, ids, context=None):
""" Opens Wiki Page of Group
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of open wiki page’s IDs
@return: dictionay of open wiki window on give group id
"""
if context is None:
context = {}
group_ids = context.get('active_ids', [])
for group in self.pool.get('wiki.groups').browse(cr, uid, group_ids, context=context):
value = {
'domain': "[('group_id','=',%d)]" % (group.id),
'name': 'Wiki Page',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'wiki.wiki',
'view_id': False,
'type': 'ir.actions.act_window',
}
if group.method == 'page':
value['res_id'] = group.home.id
elif group.method == 'list':
value['view_type'] = 'form'
value['view_mode'] = 'tree,form'
elif group.method == 'tree':
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'wiki.wiki.tree.children')])
value['view_id'] = view_id
value['domain'] = [('group_id', '=', group.id), ('parent_id', '=', False)]
value['view_type'] = 'tree'
return value
wiki_wiki_page_open()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0.001884 |
# -*- coding: utf-8 -*-
from re import sub
# Entities to be converted
entities = (
# ISO-8895-1 (most common)
("ä", u"ä"),
("ä", u"ä"),
("ü", u"ü"),
("ü", u"ü"),
("ö", u"ö"),
("ö", u"ö"),
("Ä", u"Ä"),
("Ä", u"Ä"),
("Ü", u"Ü"),
("Ü", u"Ü"),
("Ö", u"Ö"),
("Ö", u"Ö"),
("ß", u"ß"),
("ß", u"ß"),
# Rarely used entities
("…", u"..."),
("–", u"-"),
(" ", u" "),
(""", u"\""),
("&", u"&"),
("'", u"'"),
("<", u"<"),
(">", u">"),
# Common entities
("<", u"<"),
(">", u">"),
(" ", u" "),
("&", u"&"),
(""", u"\""),
("'", u"'"),
)
def strip_readable(html):
# Newlines are rendered as whitespace in html
html = html.replace('\n', ' ')
# Multiple whitespaces are rendered as a single one
html = sub('\s\s+', ' ', html)
# Replace <br> by newlines
html = sub('<br(\s+/)?>', '\n', html)
# Replace <p>, <ul>, <ol> and end of these tags by newline
html = sub('</?(p|ul|ol)(\s+.*?)?>', '\n', html)
# Replace <li> by - and </li> by newline
html = sub('<li(\s+.*?)?>', '-', html)
html = html.replace('</li>', '\n')
# And 'normal' stripping
return strip(html)
def strip(html):
# Strip enclosed tags
html = sub('<(.*?)>', '', html)
# Convert html entities
for escaped, unescaped in entities:
html = html.replace(escaped, unescaped)
# Return result with leading/trailing whitespaces removed
return html.strip()
| 0.039704 |
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| 0.000257 |
from django.conf.urls import patterns, include, url
from cooppizza.cardapio import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^pizza$', views.pizza, name='pizza'),
url(r'^bebida$', views.bebida, name='bebida'),
url(r'^ingrediente$', views.ingrediente, name='ingrediente'),
url(r'^ingrediente/novo$', views.ingredienteNovo, name='ingredienteNovo'),
url(r'^ingrediente/consulta$', views.ingredienteConsulta, name='ingredienteConsulta'),
url(r'^ingrediente/cadastrar$', views.ingredienteCadastrar, name='ingredienteCadastrar'),
url(r'^ingrediente/(?P<ingrediente_id>\d+)/editar$', views.ingredienteEdita, name='ingredienteEdita'),
url(r'^ingrediente/(?P<ingrediente_id>\d+)/alterar$', views.ingredienteAltera, name='ingredienteAltera'),
url(r'^ingrediente/(?P<ingrediente_id>\d+)$', views.ingredienteDados, name='ingredienteDados'),
url(r'^ingrediente/(?P<ingrediente_id>\d+)/deletar$', views.ingredienteDeletar, name='ingredienteDeletar'),
url(r'^pizza/nova$', views.pizzaNova, name='pizzaNova'),
url(r'^pizza/consulta$', views.pizzaConsulta, name='pizzaConsulta'),
url(r'^pizza/cadastrar$', views.pizzaCadastrar, name='pizzaCadastrar'),
url(r'^pizza/(?P<pizza_id>\d+)$', views.pizzaDados, name='pizzaDados'),
url(r'^pizza/(?P<pizza_id>\d+)/editar$', views.pizzaEdita, name='pizzaEdita'),
url(r'^pizza/(?P<pizza_id>\d+)/alterar$', views.pizzaAltera, name='pizzaAltera'),
url(r'^pizza/(?P<pizza_id>\d+)/ingredienteDaPizza$', views.pizzaIngrediente, name='pizzaIngrediente'),
url(r'^pizza/(?P<pizza_id>\d+)/adicionar$', views.pizzaIngredienteAdiciona, name='pizzaIngredienteAdiciona'),
url(r'^pizza/(?P<pizza_id>\d+)/deletar$', views.pizzaDeletar, name='pizzaDeletar'),
url(r'^pizza/(?P<pizza_id>\d+)/(?P<pizzaingrediente_id>\d+)/deletarIngrediente$', views.pizzaDeletarIngrediente, name='pizzaDeletarIngrediente'),
url(r'^bebida/nova$', views.bebidaNova, name='bebidaNova'),
url(r'^bebida/consulta$', views.bebidaConsulta, name='bebidaConsulta'),
url(r'^bebida/cadastrar$', views.bebidaCadastrar, name='bebidaCadastrar'),
url(r'^bebida/(?P<bebida_id>\d+)$', views.bebidaDados, name='bebidaDados'),
url(r'^bebida/(?P<bebida_id>\d+)/editar$', views.bebidaEdita, name='bebidaEdita'),
url(r'^bebida/(?P<bebida_id>\d+)/alterar$', views.bebidaAltera, name='bebidaAltera'),
url(r'^bebida/(?P<bebida_id>\d+)/deletar$', views.bebidaDeletar, name='bebidaDeletar'),
url(r'^lista$', views.listaProduto, name='lista'),
)
| 0.017814 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow composable models used as building blocks for estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
class _ComposableModel(object):
"""ABC for building blocks that can be used to create estimators.
Subclasses need to implement the following methods:
- build_model
- _get_optimizer
See below for the required signatures.
_ComposableModel and its subclasses are not part of the public tf.learn API.
"""
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope,
trainable=True):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._trainable = trainable
self._feature_columns = None
def get_scope_name(self):
"""Returns the scope name used by this model for variables."""
return self._scope
def build_model(self, features, feature_columns, is_training):
"""Builds the model that can calculate the logits.
Args:
features: A mapping from feature columns to tensors.
feature_columns: An iterable containing all the feature columns used
by the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
is_training: Set to True when training, False otherwise.
Returns:
The logits for this model.
"""
raise NotImplementedError
def get_train_step(self, loss):
"""Returns the ops to run to perform a training step on this estimator.
Args:
loss: The loss to use when calculating gradients.
Returns:
The ops to run to perform a training step.
"""
my_vars = self._get_vars()
if not (self._get_feature_columns() or my_vars):
return []
grads = gradients.gradients(loss, my_vars)
if self._gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
def _get_feature_columns(self):
if not self._feature_columns:
return None
feature_column_ops.check_feature_columns(self._feature_columns)
return sorted(set(self._feature_columns), key=lambda x: x.key)
def _get_vars(self):
if self._get_feature_columns():
return ops.get_collection(self._scope)
return []
def _get_optimizer(self):
if (self._optimizer is None or isinstance(self._optimizer,
six.string_types)):
optimizer = self._get_default_optimizer(self._optimizer)
elif callable(self._optimizer):
optimizer = self._optimizer()
else:
optimizer = self._optimizer
return optimizer
def _get_default_optimizer(self, optimizer_name=None):
raise NotImplementedError
class LinearComposableModel(_ComposableModel):
"""A _ComposableModel that implements linear regression.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
optimizer=None,
_joint_weights=False,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes LinearComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
_joint_weights: If True use a single (possibly partitioned) variable
to store all weights in this model. Faster, but requires that all
feature columns are sparse and have the 'sum' combiner.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If scope
is not supplied, it will default to 'linear'.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "linear" if not scope else scope
super(LinearComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._joint_weights = _joint_weights
def get_weights(self, model_dir):
"""Returns weights per feature of the linear part.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model (without the optimizer weights).
"""
all_variables = [name for name, _ in list_variables(model_dir)]
values = {}
optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
for name in all_variables:
if (name.startswith(self._scope + "/") and
name != self._scope + "/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = load_variable(model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
def get_bias(self, model_dir):
"""Returns bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return load_variable(model_dir, name=(self._scope + "/bias_weight"))
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)
with variable_scope.variable_scope(
self._scope, values=features.values(),
partitioner=partitioner) as scope:
if self._joint_weights:
logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
else:
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Ftrl"
default_learning_rate = 1. / math.sqrt(len(self._get_feature_columns()))
default_learning_rate = min(0.2, default_learning_rate)
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](
learning_rate=default_learning_rate)
class DNNComposableModel(_ComposableModel):
"""A _ComposableModel that implements a DNN.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
hidden_units,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes DNNComposableModel objects.
Args:
num_label_columns: The number of label columns.
hidden_units: List of hidden units per layer. All layers are fully
connected.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If not scope
is supplied, one is generated.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "dnn" if not scope else scope
super(DNNComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._hidden_units = hidden_units
self._activation_fn = activation_fn
self._dropout = dropout
def get_weights(self, model_dir):
"""Returns the weights of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/weights"))]
def get_bias(self, model_dir):
"""Returns the bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/biases"))]
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s/activation" % tag, value)
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20))
with variable_scope.variable_scope(
self._scope + "/input_from_feature_columns",
values=features.values(),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_scope(
self._scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
self._scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
self._add_hidden_layer_summary(logits, "logits")
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Adagrad"
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](learning_rate=0.05)
| 0.003048 |
# -*- coding: utf-8 -*-
# beagle - scrape web resources for changes and notify users by email
# Copyright (C) 2013 The Open Knowledge Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import txmongo
import datetime
from collections import defaultdict
from scrapy import log
from beaglemail import sender, template
from db.collections import Users, Checksums
from twisted.internet import defer
class UpdateChecker(object):
"""
Check site items for updates by comparing checksums to those in a
MongoDB collection. Sends out emails to users assigned with the sites
if there are any updates.
"""
def open_spider(self, spider):
"""
Called when the spider starts (but before it crawls). This method is
decorated with defer.inlineCallbacks because those are used when
accessing MongoDB.
"""
# We will need to hold all checksums and changes until the spider
# is closed (so we won't access the database for every item)
self.checksums = defaultdict(set)
self.changes = defaultdict(list)
# Open up the database connection and update the checksums
with Checksums(spider.crawler.settings) as checksums:
self.checksums.update(checksums.all())
def process_item(self, item, spider):
"""
Process each crawled page/source and check if it has changes
"""
try:
# Try to get the checksum from the set
checksum = self.checksums[item['site']].remove(item['checksum'])
# If this leaves us with an empty set we remove the key
# this makes handling this later much easier
if not self.checksums[item['site']]:
del self.checksums[item['site']]
except KeyError:
# If there's a key error the checksum doesn't exist (either it's
# a new site or the site has been modified)
if item['checksum'] not in self.checksums[item['site']]:
# We add the site along with the url and the checksum to
# our changes dictionary
self.changes[item['site']].append({
'url':item['url'], 'checksum': item['checksum']})
return item
def close_spider(self, spider):
"""
Called when the spider closes (after the crawl). This goes through all
changes, additions, and removals updates the database and sends out an
email in case the sites have changes somehow
"""
with Checksums(spider.crawler.settings) as checksums:
# Go through all changes and update the checksums
for site, urls in self.changes.iteritems():
for url in urls:
result = checksums.update(site, url['url'], url['checksum'])
# Remove all sites remaining in checksums dict because the are no
# longer accessible (not crawled)
for site, checksum_set in self.checksums.iteritems():
checksums.remove(site, list(checksum_set))
# We loop through the sites that have been changed to
# send emails to the user watching them and update its time.
# But first we create an emailer out of our settings
emailer = sender.Emailer(spider.settings)
with Users(spider.crawler.settings) as users:
for site in set(self.changes.keys()):
# Get the user that watches this dataset
for user in users.have_url(site):
# Send an email to that user
# Get plain and html content to send with the emailer
# The scraper email uses docurl for the site url and
# appurl to show where the form is
params = {'researcher':user['name'], 'docurl':site,
'appurl':spider.settings.get('FORM_URL', '')}
# We set html to True because we want both plain
# and html versions
(plain, html) = template.render('scraper.email', params,
user.get('locale', None),
html=True)
emailer.send(user['email'], plain, html_content=html)
# Update the last_changed for that particular site in the user's
# list of sites
users.touch(site)
| 0.002355 |
import sys
import numpy as np
import csv
from datetime import datetime
from datetime import timedelta
def subtract_dates(date1, date2):
"""
Takes two dates %Y-%m-%d format. Returns date1 - date2, measured in days.
"""
date_format = "%Y-%m-%d"
a = datetime.strptime(date1, date_format)
b = datetime.strptime(date2, date_format)
delta = a - b
#print(date1,"-",date2,"=",delta.days)
return delta.days
def steps_to_date(steps, start_date):
date_format = "%Y-%m-%d"
date_1 = datetime.strptime(start_date, "%Y-%m-%d")
new_date = (date_1 + timedelta(days=steps)).date()
return new_date
def _processEntry(row, table, data_type, date_column, count_column, start_date):
"""
Code to process a population count from a CSV file.
column <date_column> contains the corresponding date in %Y-%m-%d format.
column <count_column> contains the population size on that date.
"""
if len(row) < 2:
return table
if row[0][0] == "#":
return table
if row[1]=="":
return table
# Make sure the date column becomes an integer, which contains the offset in days relative to the start date.
row[date_column] = subtract_dates(row[date_column], start_date)
if data_type == "int":
table = np.vstack([table,[int(row[date_column]), int(row[count_column])]])
else:
table = np.vstack([table,[float(row[date_column]), float(row[count_column])]])
return table
def AddCSVTables(table1, table2):
"""
Add two time series tables. This version does not yet support interpolation between values.
(The UNHCR data website also does not do this, by the way)
"""
table = np.zeros([0,2])
offset = 0
last_c2 = np.zeros(([1,2]))
for c2 in table2:
# If table 2 date value is higher, then keep adding entries from table 1
while c2[0] > table1[offset][0]:
table = np.vstack([table,[table1[offset][0], last_c2[1]+table1[offset][1]]])
if(offset < len(table1)-1):
offset += 1
else:
break
# If the two match, add a total.
if c2[0] == table1[offset][0]:
table = np.vstack([table,[c2[0], c2[1]+table1[offset][1]]])
if(offset < len(table1)-1):
offset += 1
last_c2 = c2
continue
# If table 1 value is higher, add an aggregate entry, and go to the next iteration without increasing the offset.
if c2[0] < table1[offset][0]:
table = np.vstack([table,[c2[0], c2[1]+table1[offset][1]]])
last_c2 = c2
continue
return table
def ConvertCsvFileToNumPyTable(csv_name, data_type="int", date_column=0, count_column=1, start_date="2012-02-29"):
"""
Converts a CSV file to a table with date offsets from 29 feb 2012.
CSV format for each line is:
yyyy-mm-dd,number
Default settings:
- subtract_dates is used on column 0.
- Use # sign to comment out lines. (first line is NOT ignored by default)
"""
table = np.zeros([0,2])
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
row = next(values)
if(len(row)>1):
if len(row[0])>0 and "DateTime" not in row[0]:
table = _processEntry(row, table, data_type, date_column, count_column, start_date)
for row in values:
table = _processEntry(row, table, data_type, date_column, count_column, start_date)
return table
class DataTable:
def __init__(self, data_directory="mali2012", data_layout="data_layout_refugee.csv", start_date="2012-02-29", csvformat="generic"):
"""
read in CSV data files containing refugee data.
"""
self.csvformat = csvformat
self.total_refugee_column = 1
self.days_column = 0
self.header = []
self.data_table = []
self.start_date = start_date
self.override_refugee_input = False # Use modified input data for FLEE simulations
self.override_refugee_input_file = ""
self.data_directory = data_directory
if self.csvformat=="generic":
with open("%s/%s" % (data_directory, data_layout), newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if(len(row)>1):
if(row[0][0] == "#"):
continue
self.header.append(row[0])
#print("%s/%s" % (data_directory, row[1]))
csv_total = ConvertCsvFileToNumPyTable("%s/%s" % (data_directory, row[1]), start_date=start_date)
for added_csv in row[2:]:
csv_total = AddCSVTables(csv_total, ConvertCsvFileToNumPyTable("%s/%s" % (data_directory, added_csv), start_date=start_date))
self.data_table.append(csv_total)
#print(self.header, self.data_table)
def override_input(self, data_file_name):
"""
Do not use the total refugee count data as the input value, but instead take values from a separate file.
"""
self.override_refugee_input_file = data_file_name
self.override_refugee_input = True
self.header.append("total (modified input)")
self.data_table.append(ConvertCsvFileToNumPyTable("%s" % (data_file_name), start_date=self.start_date))
def get_daily_difference(self, day, day_column=0, count_column=1, Debug=False, FullInterpolation=True):
"""
Extrapolate count of new refugees at a given time point, based on input data.
count_column = column which contains the relevant difference.
FullInterpolation: when disabled, the function ignores any decreases in refugee count.
when enabled, the function can return negative numbers when the new total is higher than the older one.
"""
self.total_refugee_column = count_column
self.days_column = day_column
ref_table = self.data_table[0]
if self.override_refugee_input == True:
ref_table = self.data_table[self._find_headerindex("total (modified input)")]
# Refugees only come in *after* day 0.
if int(day) == 0:
ref_table = self.data_table[0]
new_refugees = 0
for i in self.header[1:]:
new_refugees += self.get_field(i, 0, FullInterpolation)
#print("Day 0 data:",i,self.get_field(i, 0, FullInterpolation))
return int(new_refugees)
else:
new_refugees = 0
for i in self.header[1:]:
new_refugees += self.get_field(i, day, FullInterpolation) - self.get_field(i, day-1, FullInterpolation)
#print self.get_field("Mbera", day), self.get_field("Mbera", day-1)
return int(new_refugees)
# If the day exceeds the validation data table, then we return 0
return 0
def get_interpolated_data(self, column, day):
"""
Gets in a given column for a given day. Interpolates between days as needed.
"""
ref_table = self.data_table[column]
old_val = ref_table[0,self.total_refugee_column]
#print(ref_table[0][self.days_column])
old_day = ref_table[0,self.days_column]
if day <= old_day:
return old_val
for i in range(1, len(ref_table)):
#print(day, ref_table[i][self.days_column])
if day < ref_table[i,self.days_column]:
old_val = ref_table[i-1,self.total_refugee_column]
old_day = ref_table[i-1,self.days_column]
fraction = float(day - old_day) / float(ref_table[i,self.days_column] - old_day)
if fraction > 1.0:
print("Error with days_column: ", ref_table[i,self.days_column])
return -1
#print(day, old_day, ref_table[i][self.total_refugee_column], old_val)
return int(old_val + fraction * float(ref_table[i,self.total_refugee_column] - old_val))
#print("# warning: ref_table length exceeded for column: ",day, self.header[column], ", last ref_table values: ", ref_table[i-1][self.total_refugee_column], ref_table[i][self.days_column])
return int(ref_table[-1,self.total_refugee_column])
def get_raw_data(self, column, day):
"""
Gets in a given column for a given day. Does not Interpolate.
"""
ref_table = self.data_table[column]
old_val = ref_table[0][self.total_refugee_column]
old_day = 0
for i in range (0,len(ref_table)):
if day >= ref_table[i][self.days_column]:
old_val = ref_table[i][self.total_refugee_column]
old_day = ref_table[i][self.days_column]
else:
break
return int(old_val)
def _find_headerindex(self, name):
"""
Finds matching index number for a particular name in the list of headers.
"""
for i in range(0,len(self.header)):
if self.header[i] == name:
return i
print(self.header)
sys.exit("Error: can't find the header %s in the header list" % (name))
def get_field(self, name, day, FullInterpolation=True):
"""
Gets in a given named column for a given day. Interpolates between days if needed.
"""
i = self._find_headerindex(name)
if FullInterpolation:
#print(name, i, day, self.get_interpolated_data(i, day))
return self.get_interpolated_data(i, day)
else:
return self.get_raw_data(i, day)
def print_data_values_for_location(self, name, last_day):
"""
print all data values for selected location.
"""
for i in range(0,last_day):
print(i, self.get_field(name,i))
def is_interpolated(self, name, day):
"""
Checks if data for a given day is inter/extrapolated or not.
"""
for i in range(0,len(self.header)):
if self.header[i] == name:
ref_table = self.data_table[i]
for j in range(0, len(ref_table)):
if int(day) == int(ref_table[j][self.days_column]):
return False
if int(day) < int(ref_table[j][self.days_column]):
return True
return True
#def d.correctLevel1Registrations(name, date):
# correct for start date.
| 0.016253 |
"""Reporter foundation for Coverage."""
import os
from codeunit import code_unit_factory
class Reporter(object):
"""A base class for all reporters."""
def __init__(self, coverage, ignore_errors=False):
"""Create a reporter.
`coverage` is the coverage instance. `ignore_errors` controls how
skittish the reporter will be during file processing.
"""
self.coverage = coverage
self.ignore_errors = ignore_errors
# The code units to report on. Set by find_code_units.
self.code_units = []
# The directory into which to place the report, used by some derived
# classes.
self.directory = None
def find_code_units(self, morfs, omit_prefixes):
"""Find the code units we'll report on.
`morfs` is a list of modules or filenames. `omit_prefixes` is a list
of prefixes to leave out of the list.
"""
morfs = morfs or self.coverage.data.executed_files()
self.code_units = code_unit_factory(
morfs, self.coverage.file_locator, omit_prefixes)
self.code_units.sort()
def report_files(self, report_fn, morfs, directory=None,
omit_prefixes=None):
"""Run a reporting function on a number of morfs.
`report_fn` is called for each relative morf in `morfs`.
"""
self.find_code_units(morfs, omit_prefixes)
self.directory = directory
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
for cu in self.code_units:
try:
if not cu.relative:
continue
statements, excluded, missing, _ = self.coverage._analyze(cu)
report_fn(cu, statements, excluded, missing)
except KeyboardInterrupt:
raise
except:
if not self.ignore_errors:
raise
| 0.005374 |
# -*- coding: utf-8 -*-
import pytest
from esteid.util import get_id_from_legacy_common_name, get_name_from_legacy_common_name
@pytest.mark.parametrize(
"common_name,expected_name",
[
("TESTNUMBER,SEITSMES,51001091072", "Seitsmes Testnumber"),
("TESTNUMBER\\,SEITSMES\\,51001091072", "Seitsmes Testnumber"),
("TEST-NUMBER,SEITSMES MEES,51001091072", "Seitsmes Mees Test-Number"),
("O’CONNEŽ-ŠUSLIK,MARY ÄNN,11412090004", "Mary Änn O’Connež-Šuslik"),
],
)
def test_get_name_from_legacy_common_name(common_name, expected_name):
result = get_name_from_legacy_common_name(common_name)
assert result == expected_name
@pytest.mark.parametrize(
"common_name,expected_id",
[
("TESTNUMBER,SEITSMES,51001091072", "51001091072"),
("TESTNUMBER\\,SEITSMES\\,51001091072", "51001091072"),
("TEST-NUMBER,SEITSMES MEES,51001091072", "51001091072"),
("O’CONNEŽ-ŠUSLIK,MARY ÄNN,11412090004", "11412090004"),
],
)
def test_get_id_from_legacy_common_name(common_name, expected_id):
result = get_id_from_legacy_common_name(common_name)
assert result == expected_id
| 0.000866 |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: netapp_e_amg_role
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
description:
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- The ID of the primary storage array for the async mirror action
required: yes
role:
description:
- Whether the array should be the primary or secondary array for the AMG
required: yes
choices: ['primary', 'secondary']
noSync:
description:
- Whether to avoid synchronization prior to role reversal
required: no
default: no
choices: [yes, no]
force:
description:
- Whether to force the role reversal regardless of the online-state of the primary
required: no
default: no
"""
EXAMPLES = """
- name: Update the role of a storage array
netapp_e_amg_role:
name: updating amg role
role: primary
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg:
description: Failure message
returned: failure
type: string
sample: "No Async Mirror Group with the name."
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
amg_exists = False
has_desired_role = False
amg_id = None
amg_data = None
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + get_amgs
try:
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except:
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
for amg in amgs:
if amg['label'] == name:
amg_exists = True
amg_id = amg['id']
amg_data = amg
if amg['localRole'] == body.get('role'):
has_desired_role = True
return amg_exists, has_desired_role, amg_id, amg_data
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
url = api_url + endpoint
post_data = json.dumps(body)
try:
request(url, data=post_data, method='POST', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
status_url = api_url + status_endpoint
try:
rc, status = request(status_url, method='GET', url_username=api_usr,
url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
# Here we wait for the role reversal to complete
if 'roleChangeProgress' in status:
while status['roleChangeProgress'] != "none":
try:
rc, status = request(status_url, method='GET',
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except:
err = get_exception()
module.fail_json(
msg="Failed to check status of AMG after role reversal. " +
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
return status
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
role=dict(required=True, choices=['primary', 'secondary']),
noSync=dict(required=False, type='bool', default=False),
force=dict(required=False, type='bool', default=False),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
name = p.pop('name')
if not api_url.endswith('/'):
api_url += '/'
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
if not agm_exists:
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
elif has_desired_role:
module.exit_json(changed=False, **amg_data)
else:
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
if amg_data:
module.exit_json(changed=True, **amg_data)
else:
module.exit_json(changed=True, msg="AMG role changed.")
if __name__ == '__main__':
main()
| 0.003431 |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--interactive", action="store_true")
args = parser.parse_args()
import trajoptpy
import openravepy as rave
import numpy as np
import json
import trajoptpy.math_utils as mu
import trajoptpy.kin_utils as ku
def move_arm_to_grasp(xyz_targ, quat_targ, link_name, manip_name):
request = {
"basic_info" : {
"n_steps" : 10,
"manip" : manip_name,
"start_fixed" : True
},
"costs" : [
{
"type" : "collision",
"params" : {"coeffs" : [10],"dist_pen" : [0.025]}
},
{
"type" : "joint_vel",
"params": {"coeffs" : [1]}
} ],
"constraints" : [
{
"type" : "pose",
"name" : "final_pose",
"params" : {
"pos_coeffs" : [1,1,1],
"rot_coeffs" : [1,1,1],
"xyz" : list(xyz_targ),
"wxyz" : list(quat_targ),
"link" : link_name,
},
},
{
"type" : "cart_vel",
"name" : "cart_vel",
"params" : {
"distance_limit" : .01,
"first_step" : 7,
"last_step" : 9, #inclusive
"link" : link_name
},
}
],
"init_info" : {
"type" : "stationary"
}
}
return request
if __name__ == "__main__":
### Parameters ###
ENV_FILE = "data/wamtest1.env.xml"
MANIP_NAME = "arm"
LINK_NAME = "wam7"
##################
### Env setup ####
env = rave.Environment()
env.StopSimulation()
env.Load(ENV_FILE)
robot = env.GetRobots()[0]
manip = robot.GetManipulator(MANIP_NAME)
##################
T_w_mug = env.GetKinBody("mug-table-cluttered").GetLinks()[0].GetTransform()
xyz_targ = (T_w_mug[:3,3] + np.array([0,0,.3])).tolist()
quat_targ = np.array([0,1/np.sqrt(2),1/np.sqrt(2),0]).tolist()
request = move_arm_to_grasp(xyz_targ, quat_targ, LINK_NAME, MANIP_NAME)
s = json.dumps(request)
print "REQUEST:",s
trajoptpy.SetInteractive(args.interactive);
prob = trajoptpy.ConstructProblem(s, env)
result = trajoptpy.OptimizeProblem(prob)
| 0.029548 |
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<[email protected]>
import sys
import commands
import subprocess
import time
reload(sys)
sys.setdefaultencoding('utf-8')
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
class TestApp():
device = ""
location = ""
pkgname = ""
activname = ""
def __init__(self, device, location, pkgname, activname):
self.device = device
self.location = location
self.pkgname = pkgname
self.activname = activname
def install(self):
action_status = False
if self.location.endswith(".apk"):
if not self.isInstalled():
cmd = "%s -s %s install -r %s" % (ADB_CMD, self.device, self.location)
(return_code, output) = doCMD(cmd)
if self.isInstalled():
action_status = True
else:
print "-->> %s fail to install." % self.location
else:
print "-->> %s has been installed." % self.pkgname
else:
print "-->> Invalid apk location: %s " % self.location
return action_status
def uninstall(self):
action_status = False
if self.isInstalled():
cmd = "%s -s %s uninstall %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isInstalled():
action_status = True
else:
print "-->> %s fail to uninstall." % self.pkgname
else:
print "-->> %s has not been installed." % self.pkgname
return action_status
def launch(self):
action_status = False
if not self.isRunning():
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
## waiting for app launch
time.sleep(5)
if self.isRunning():
action_status = True
else:
print "-->> %s fail to launch." % self.pkgname
else:
print "-->> %s has been launched." % self.pkgname
return action_status
def switch(self):
action_status = False
# If in Activity, switch to background, otherwise switch to front
if self.isActivity():
# Switch to Home
# keycode
# 3 --> "KEYCODE_HOME"
time.sleep(5)
cmd = "%s -s %s shell input keyevent 3" % (ADB_CMD, self.device)
(return_code, output) = doCMD(cmd)
## waiting for app hidden
time.sleep(5)
if not self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to background." % self.pkgname
else:
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
## waiting for app launch
if self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to front." % self.pkgname
return action_status
def stop(self):
action_status = False
if self.isRunning():
cmd = "%s -s %s shell am force-stop %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isRunning():
action_status = True
else:
print "-->> %s fail to stop." % self.pkgname
else:
print "-->> %s has been stoped." % self.pkgname
return action_status
def isInstalled(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell pm list packages |grep %s|awk -F ':' '{print $2}'" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if self.pkgname in output:
action_status = True
return action_status
def isRunning(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell ps |grep %s|awk -F ' ' '{print $NF}'" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if self.pkgname in output:
action_status = True
return action_status
def isActivity(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell dumpsys activity |grep \"%s\"" % (ADB_CMD, self.device, "Recent #0")
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
| 0.002176 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-*-*- encoding: utf-8 -*-*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: PO
import Axon
import feedparser
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Splitter import Plug, PlugSplitter
from Kamaelia.Util.OneShot import OneShot
from Axon.Ipc import producerFinished, shutdownMicroprocess
from ForwarderComponent import Forwarder
SAVE = 'pickle'
if SAVE == 'pickle':
import pickle
FILENAME = 'feeds-control.tmp'
def reset():
pickle.dump({}, open(FILENAME, 'w'))
def started(url):
data = pickle.load(open(FILENAME))
data[url] = 'started'
pickle.dump(data, open(FILENAME, 'w'))
def stopped(url):
data = pickle.load(open(FILENAME))
data[url] = 'stopped'
pickle.dump(data, open(FILENAME, 'w'))
reset()
else:
def started(url):
pass
def stopped(url):
pass
class Feedparser(Axon.Component.component):
"""
Feedparser(feedUrl) -> Feedparser object
It receives the content of a feed and sends the parsed
content. The parsed content is in a feedparser.FeedParserDict
object. It sets the 'href' attribute to the feedUrl.
"""
def __init__(self, feedUrl):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Feedparser, self).__init__()
self.feedUrl = feedUrl
def main(self):
while True:
if self.dataReady("inbox"):
data = self.recv("inbox")
parseddata = feedparser.parse(data)
parseddata.href = self.feedUrl
if parseddata.has_key('bozo_exception'):
self.send(producerFinished(self),"signal")
stopped(self.feedUrl)
return
else:
self.send(parseddata, "outbox")
self.send(producerFinished(self),"signal")
stopped(self.feedUrl)
return
if self.dataReady("control"):
data = self.recv("control")
self.send(data,"signal")
if not isinstance(data, producerFinished):
print data
stopped(self.feedUrl)
return
if not self.anyReady():
self.pause()
yield 1
class FeedParserFactory(Axon.Component.component):
"""
FeedParserFactory() -> FeedParserFactory object
It receives different feed URLs throught the "inbox" inbox
and returns each post parsed through the "outbox" outbox.
This class can handles multiple concurrent petitions, retrieves
the content of the feed and parses it with the feedparser library.
The result is a feedparser.FeedParserDict per each feed URL
provided.
"""
Inboxes = {
"inbox" : "Strings representing different URLs of feeds",
"control" : "From component...",
"_parsed-feeds" : "Parsed feeds retrieved from FeedParserFactory children",
}
Outboxes = {
"outbox" : "feedparser.FeedParserDict object representing a parsed feed",
"signal" : "From component...",
"_signal" : "To the internal parsers",
}
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(FeedParserFactory, self).__init__(**argd)
self.mustStop = None
self.providerFinished = False
def makeFeedParser(self, feedUrl):
"""
makeFeedParser(feedUrl) -> Pipeline
It returns a pipeline which does not expect any input except for signals and
sends the parsed data through the "outbox" outbox.
"""
started(feedUrl)
return Pipeline(
OneShot(feedUrl),
SimpleHTTPClient(), # TODO: SimpleHTTPClient doesn't seem to have proxy support
)
def checkControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg,producerFinished):
self.providerFinished = True
elif isinstance(msg,shutdownMicroprocess):
self.mustStop = msg
return self.mustStop, self.providerFinished
def handleChildTerminations(self): #taken from Carousel.py
for child in self.childComponents():
if child._isStopped():
self.removeChild(child)
def initiateInternalSplitter(self):
self.internalSplitter = PlugSplitter()
self.link((self,'_signal'), (self.internalSplitter, 'control'))
self.addChildren(self.internalSplitter)
self.internalSplitter.activate()
def linkChildToInternalSplitter(self, child):
forwarder = Forwarder()
plug = Plug(self.internalSplitter, forwarder)
plug.activate()
plug.link((plug, 'signal'), (child, 'control'))
child.link((child, 'signal'), (plug, 'control'))
def createChild(self, feed):
child = self.makeFeedParser(feed.url)
child = Pipeline(child, Feedparser(feed.url))
self.link( (child, 'outbox'), (self, '_parsed-feeds') )
self.linkChildToInternalSplitter(child)
return child
def waitForChildren(self, signalMessage):
self.send(signalMessage,"_signal")
while len(self.childComponents()) > 0:
self.handleChildTerminations()
yield 1
def main(self):
self.initiateInternalSplitter()
yield 1
while True:
mustStop, providerFinished = self.checkControl()
if mustStop:
self.send(mustStop,"signal")
return
self.handleChildTerminations()
while self.dataReady("inbox"):
feed = self.recv("inbox")
child = self.createChild(feed)
self.addChildren(child)
child.activate()
while self.dataReady("_parsed-feeds"):
parseddata = self.recv("_parsed-feeds")
self.send(parseddata,"outbox")
if providerFinished and len(self.childComponents()) == 1:
# TODO: CHECK IF THIS IS THE PROBLEM
# It's actually only waiting for the plugsplitter
for _ in self.waitForChildren(producerFinished(self)):
yield 1
pfinished = producerFinished(self)
self.send(pfinished,"signal")
return
if not self.anyReady():
self.pause()
yield 1
| 0.008567 |
import unittest
import argparse as ap
import subprocess as sp
from iris.tests import IrisTest
import imageservice
from imageservice import serveupimage
from imageservice import networking
from imageservice import imageproc
from imageservice import packer
from imageservice import dataproc
from imageservice import config as conf
import numpy as np
import iris
from numpy.testing import assert_array_equal
import os
import shutil
import time
fileDir = os.path.dirname(__file__)
class UnitTests(unittest.TestCase):
def setUp(self):
self.profile = ap.Namespace(**conf.profiles["default"])
self.data = serveupimage.loadCube(os.path.join(fileDir, "data", "test_input.nc"),
conf.topog_file,
self.profile.data_constraint)
self.proced_data = iris.load_cube(os.path.join(fileDir, "data", "proced_data.nc"))
self.tiled_data = iris.load_cube(os.path.join(fileDir, "data", "tiled_data.nc")).data
def test_dataproc(self):
# tidy up any problems arising from the on-the-fly altitude calc
san_data = dataproc.sanitizeAlt(self.data)
# regrid and restratify the data
rg_data = dataproc.regridData(san_data,
regrid_shape=self.profile.regrid_shape,
extent=self.profile.extent)
# do any further processing (saturation etc) and convert to 8 bit uint
proced_data = dataproc.procDataCube(rg_data)
self.assertTrue(proced_data.data.max() <= conf.max_val)
assert_array_equal(self.proced_data.data, proced_data.data)
def test_packer(self):
self.assertEquals(packer.find_i_j(10, 20, 15, nchannels=3), [16, 128])
def test_imageproc(self):
data_tiled = imageproc.tileArray(self.proced_data.data)
assert_array_equal(self.tiled_data, data_tiled)
def test_networking(self):
networking.postImage(self.tiled_data, self.data)
class IntegrationTest(unittest.TestCase):
def test_integration(self):
inputfile = os.path.join(fileDir, "data", "test_input.nc")
sp.call(["imageservice/serveupimage.py",
"--profile=default",
inputfile])
def resetTestData(new_data_array, test_data_file):
_ = iris.cube.Cube(new_data_array)
iris.save(_, test_data_file)
if __name__ == '__main__':
unittest.main()
| 0.002035 |
""" Illustris Simulation: Public Data Release.
groupcat.py: File I/O related to the FoF and Subfind group catalogs. """
from os.path import isfile
import numpy as np
import h5py
def gcPath(basePath,snapNum,chunkNum=0):
""" Return absolute path to a group catalog HDF5 file (modify as needed). """
gcPath = basePath + '/groups_%03d/' % snapNum
filePath1 = gcPath + 'groups_%03d.%d.hdf5' % (snapNum, chunkNum)
filePath2 = gcPath + 'fof_subhalo_tab_%03d.%d.hdf5' % (snapNum, chunkNum)
if isfile(filePath1):
return filePath1
return filePath2
def offsetPath(basePath, snapNum):
""" Return absolute path to a separate offset file (modify as needed). """
offsetPath = basePath + '../postprocessing/offsets/offsets_%03d.hdf5' % snapNum
return offsetPath
def loadObjects(basePath,snapNum,gName,nName,fields):
""" Load either halo or subhalo information from the group catalog. """
result = {}
# make sure fields is not a single element
if isinstance(fields, basestring):
fields = [fields]
# load header from first chunk
with h5py.File(gcPath(basePath,snapNum),'r') as f:
header = dict( f['Header'].attrs.items() )
result['count'] = f['Header'].attrs['N'+nName+'_Total']
if not result['count']:
print('warning: zero groups, empty return (snap='+str(snapNum)+').')
return result
# if fields not specified, load everything
if not fields:
fields = f[gName].keys()
for field in fields:
# verify existence
if not field in f[gName].keys():
raise Exception("Group catalog does not have requested field ["+field+"]!")
# replace local length with global
shape = list(f[gName][field].shape)
shape[0] = result['count']
# allocate within return dict
result[field] = np.zeros( shape, dtype=f[gName][field].dtype )
# loop over chunks
wOffset = 0
for i in range(header['NumFiles']):
f = h5py.File(gcPath(basePath,snapNum,i),'r')
if not f['Header'].attrs['N'+nName+'_ThisFile']:
continue # empty file chunk
# loop over each requested field
for field in fields:
# shape and type
shape = f[gName][field].shape
# read data local to the current file
if len(shape) == 1:
result[field][wOffset:wOffset+shape[0]] = f[gName][field][0:shape[0]]
else:
result[field][wOffset:wOffset+shape[0],:] = f[gName][field][0:shape[0],:]
wOffset += shape[0]
f.close()
# only a single field? then return the array instead of a single item dict
if len(fields) == 1:
return result[fields[0]]
return result
def loadSubhalos(basePath,snapNum,fields=None):
""" Load all subhalo information from the entire group catalog for one snapshot
(optionally restrict to a subset given by fields). """
return loadObjects(basePath,snapNum,"Subhalo","subgroups",fields)
def loadHalos(basePath,snapNum,fields=None):
""" Load all halo information from the entire group catalog for one snapshot
(optionally restrict to a subset given by fields). """
return loadObjects(basePath,snapNum,"Group","groups",fields)
def loadHeader(basePath,snapNum):
""" Load the group catalog header. """
with h5py.File(gcPath(basePath,snapNum),'r') as f:
header = dict( f['Header'].attrs.items() )
return header
def load(basePath,snapNum):
""" Load complete group catalog all at once. """
r = {}
r['subhalos'] = loadSubhalos(basePath,snapNum)
r['halos'] = loadHalos(basePath,snapNum)
r['header'] = loadHeader(basePath,snapNum)
return r
def loadSingle(basePath,snapNum,haloID=-1,subhaloID=-1):
""" Return complete group catalog information for one halo or subhalo. """
if (haloID < 0 and subhaloID < 0) or (haloID >= 0 and subhaloID >= 0):
raise Exception("Must specify either haloID or subhaloID (and not both).")
gName = "Subhalo" if subhaloID >= 0 else "Group"
searchID = subhaloID if subhaloID >= 0 else haloID
# old or new format
if 'fof_subhalo' in gcPath(basePath,snapNum):
# use separate 'offsets_nnn.hdf5' files
with h5py.File(offsetPath(basePath,snapNum),'r') as f:
offsets = f['FileOffsets/'+gName][()]
else:
# use header of group catalog
with h5py.File(gcPath(basePath,snapNum),'r') as f:
offsets = f['Header'].attrs['FileOffsets_'+gName]
offsets = searchID - offsets
fileNum = np.max( np.where(offsets >= 0) )
groupOffset = offsets[fileNum]
# load halo/subhalo fields into a dict
result = {}
with h5py.File(gcPath(basePath,snapNum,fileNum),'r') as f:
for haloProp in f[gName].keys():
result[haloProp] = f[gName][haloProp][groupOffset]
return result
| 0.019267 |
"""Python part of the warnings subsystem."""
import sys
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe - warnings get lost
return
try:
file.write(formatwarning(message, category, filename, lineno, line))
except OSError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
import linecache
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
_filters_mutated()
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
_filters_mutated()
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
_filters_mutated()
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != _filters_version:
registry.clear()
registry['version'] = _filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._module._filters_mutated()
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module._filters_mutated()
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit, _filters_mutated)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
_filters_version = 1
def _filters_mutated():
global _filters_version
_filters_version += 1
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| 0.001328 |
# coding: utf-8
"""
Grumpy uses `pythonparser` as its AST parser. This module contains an augmented
(extended) parser from it, letting us to accept special Grumpy-only syntax, like
the `import '__go__/...'` syntax for importing Go code.
"""
import logging
import pythonparser.parser
from pythonparser.parser import Parser, Seq, Loc, Opt, Tok, List, Alt, Rule, action
from pythonparser import ast
logger = logging.getLogger(__name__)
PYTHNOPARSER_PATCHED = False
def patch_pythonparser():
global PYTHNOPARSER_PATCHED
if PYTHNOPARSER_PATCHED:
return False
logger.info('Monkeypatching pythonparser.parser.Parser with Grumpy extensions')
pythonparser.parser.Parser = GrumpyParser
PYTHNOPARSER_PATCHED = True
return True
class GrumpyParser(Parser):
# From: https://github.com/google/grumpy/commit/9d80504e8d42c4a03ece9ed983b0ca160d170969#diff-c46e216e8423951b5f41dde139575b68R1038
@action(Rule("atom_5"))
def import_from_7(self, string):
return (None, 0), (string.loc, string.s)
# From: https://github.com/google/grumpy/commit/9d80504e8d42c4a03ece9ed983b0ca160d170969#diff-c46e216e8423951b5f41dde139575b68R1046
@action(Seq(Loc("from"), Alt(Parser.import_from_3, Parser.import_from_4, import_from_7),
Loc("import"), Alt(Parser.import_from_5,
Seq(Loc("("), Rule("import_as_names"), Loc(")")),
Parser.import_from_6)))
def import_from(self, from_loc, module_name, import_loc, names):
"""
(2.6, 2.7)
import_from: ('from' ('.'* dotted_name | '.'+)
'import' ('*' | '(' import_as_names ')' | import_as_names))
(3.0-)
# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
'import' ('*' | '(' import_as_names ')' | import_as_names))
"""
(dots_loc, dots_count), dotted_name_opt = module_name
module_loc = module = None
if dotted_name_opt:
module_loc, module = dotted_name_opt
lparen_loc, names, rparen_loc = names
loc = from_loc.join(names[-1].loc)
if rparen_loc:
loc = loc.join(rparen_loc)
if module == "__future__":
self.add_flags([x.name for x in names])
return ast.ImportFrom(names=names, module=module, level=dots_count,
keyword_loc=from_loc, dots_loc=dots_loc, module_loc=module_loc,
import_loc=import_loc, lparen_loc=lparen_loc, rparen_loc=rparen_loc,
loc=loc)
@action(Seq(Rule("atom_5"), Opt(Seq(Loc("as"), Tok("ident")))))
def str_as_name(self, string, as_name_opt):
asname_name = asname_loc = as_loc = None
loc = string.loc
if as_name_opt:
as_loc, asname = as_name_opt
asname_name = asname.value
asname_loc = asname.loc
loc = loc.join(asname.loc)
return ast.alias(name=string.s, asname=asname_name,
loc=loc, name_loc=string.loc, as_loc=as_loc, asname_loc=asname_loc)
dotted_as_names = List(Alt(Rule("dotted_as_name"), Rule("str_as_name")), ",", trailing=False)
| 0.003617 |
#
# CanvasRenderAgg.py -- for rendering into Ginga widget with aggdraw
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
from itertools import chain
import numpy as np
import aggdraw as agg
from . import AggHelp
from ginga.canvas import render
# force registration of all canvas types
import ginga.canvas.types.all # noqa
from ginga import trcalc
class RenderContext(render.RenderContextBase):
def __init__(self, renderer, viewer, surface):
render.RenderContextBase.__init__(self, renderer, viewer)
# TODO: encapsulate this drawable
self.cr = AggHelp.AggContext(surface)
self.pen = None
self.brush = None
self.font = None
def set_line_from_shape(self, shape):
# TODO: support style
alpha = getattr(shape, 'alpha', 1.0)
linewidth = getattr(shape, 'linewidth', 1.0)
self.pen = self.cr.get_pen(shape.color, linewidth=linewidth,
alpha=alpha)
def set_fill_from_shape(self, shape):
fill = getattr(shape, 'fill', False)
if fill:
if hasattr(shape, 'fillcolor') and shape.fillcolor:
color = shape.fillcolor
else:
color = shape.color
alpha = getattr(shape, 'alpha', 1.0)
alpha = getattr(shape, 'fillalpha', alpha)
self.brush = self.cr.get_brush(color, alpha=alpha)
else:
self.brush = None
def set_font_from_shape(self, shape):
if hasattr(shape, 'font'):
if (hasattr(shape, 'fontsize') and shape.fontsize is not None and
not getattr(shape, 'fontscale', False)):
fontsize = shape.fontsize
else:
fontsize = shape.scale_font(self.viewer)
fontsize = self.scale_fontsize(fontsize)
alpha = getattr(shape, 'alpha', 1.0)
self.font = self.cr.get_font(shape.font, fontsize, shape.color,
alpha=alpha)
else:
self.font = None
def initialize_from_shape(self, shape, line=True, fill=True, font=True):
if line:
self.set_line_from_shape(shape)
if fill:
self.set_fill_from_shape(shape)
if font:
self.set_font_from_shape(shape)
def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):
# TODO: support line width and style
self.pen = self.cr.get_pen(color, alpha=alpha)
def set_fill(self, color, alpha=1.0):
if color is None:
self.brush = None
else:
self.brush = self.cr.get_brush(color, alpha=alpha)
def set_font(self, fontname, fontsize, color='black', alpha=1.0):
fontsize = self.scale_fontsize(fontsize)
self.font = self.cr.get_font(fontname, fontsize, color,
alpha=alpha)
def text_extents(self, text):
return self.cr.text_extents(text, self.font)
def get_affine_transform(self, cx, cy, rot_deg):
x, y = cx, cy # old center
nx, ny = cx, cy # new center
sx = sy = 1.0 # new scale
cosine = math.cos(math.radians(rot_deg))
sine = math.sin(math.radians(rot_deg))
a = cosine / sx
b = sine / sx
c = x - nx * a - ny * b
d = -sine / sy
e = cosine / sy
f = y - nx * d - ny * e
return (a, b, c, d, e, f)
##### DRAWING OPERATIONS #####
def draw_image(self, cvs_img, cpoints, rgb_arr, whence, order='RGBA'):
# no-op for this renderer
pass
def draw_text(self, cx, cy, text, rot_deg=0.0):
wd, ht = self.cr.text_extents(text, self.font)
affine = self.get_affine_transform(cx, cy, rot_deg)
self.cr.canvas.settransform(affine)
try:
self.cr.canvas.text((cx, cy - ht), text, self.font)
finally:
# reset default transform
self.cr.canvas.settransform()
def draw_polygon(self, cpoints):
cpoints = trcalc.strip_z(cpoints)
self.cr.canvas.polygon(list(chain.from_iterable(cpoints)),
self.pen, self.brush)
def draw_circle(self, cx, cy, cradius):
self.cr.canvas.ellipse(
(cx - cradius, cy - cradius, cx + cradius, cy + cradius),
self.pen, self.brush)
def draw_bezier_curve(self, cp):
# there is a bug in path handling of some versions of aggdraw--
# aggdraw here is ok:
path = agg.Path()
path.moveto(cp[0][0], cp[0][1])
path.curveto(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
self.cr.canvas.path(path, self.pen, self.brush)
def draw_ellipse_bezier(self, cp):
# draw 4 bezier curves to make the ellipse because there seems
# to be a bug in aggdraw ellipse drawing function
path = agg.Path()
path.moveto(cp[0][0], cp[0][1])
path.curveto(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])
path.curveto(cp[4][0], cp[4][1], cp[5][0], cp[5][1], cp[6][0], cp[6][1])
path.curveto(cp[7][0], cp[7][1], cp[8][0], cp[8][1], cp[9][0], cp[9][1])
path.curveto(cp[10][0], cp[10][1], cp[11][0], cp[11][1], cp[12][0], cp[12][1])
self.cr.canvas.path(path, self.pen, self.brush)
def draw_line(self, cx1, cy1, cx2, cy2):
self.cr.canvas.line((cx1, cy1, cx2, cy2), self.pen)
def draw_path(self, cpoints):
cp = trcalc.strip_z(cpoints)
# TODO: is there a more efficient way in aggdraw to do this?
path = agg.Path()
path.moveto(cp[0][0], cp[0][1])
for pt in cp[1:]:
path.lineto(pt[0], pt[1])
self.cr.canvas.path(path, self.pen, self.brush)
class CanvasRenderer(render.StandardPixelRenderer):
def __init__(self, viewer):
render.StandardPixelRenderer.__init__(self, viewer)
self.kind = 'agg'
self.rgb_order = 'RGBA'
self.surface = None
self.dims = ()
def resize(self, dims):
"""Resize our drawing area to encompass a space defined by the
given dimensions.
"""
width, height = dims[:2]
self.logger.debug("renderer reconfigured to %dx%d" % (
width, height))
# create agg surface the size of the window
self.surface = agg.Draw(self.rgb_order, (width, height), 'black')
super(CanvasRenderer, self).resize(dims)
def render_image(self, data, order, win_coord):
"""Render the image represented by (data) at (win_coord)
in the pixel space.
*** internal method-- do not use ***
"""
if self.surface is None:
return
self.logger.debug("redraw surface")
# get window contents as a buffer and load it into the AGG surface
rgb_buf = data.tobytes(order='C')
self.surface.frombytes(rgb_buf)
# for debugging
# import os.path, tempfile
# self.save_rgb_image_as_file(os.path.join(tempfile.gettempdir(),
# 'agg_out.png', format='png'))
def get_surface_as_array(self, order=None):
if self.surface is None:
raise render.RenderError("No AGG surface defined")
# TODO: could these have changed between the time that self.surface
# was last updated?
wd, ht = self.dims
# Get agg surface as a numpy array
arr8 = np.frombuffer(self.surface.tobytes(), dtype=np.uint8)
arr8 = arr8.reshape((ht, wd, len(self.rgb_order)))
# adjust according to viewer's needed order
return self.reorder(order, arr8)
def setup_cr(self, shape):
cr = RenderContext(self, self.viewer, self.surface)
cr.initialize_from_shape(shape, font=False)
return cr
def get_dimensions(self, shape):
cr = self.setup_cr(shape)
cr.set_font_from_shape(shape)
return cr.text_extents(shape.text)
def text_extents(self, text, font):
cr = RenderContext(self, self.viewer, self.surface)
cr.set_font(font.fontname, font.fontsize, color=font.color,
alpha=font.alpha)
return cr.text_extents(text)
#END
| 0.001082 |
"""
A release-automation toolkit.
"""
import sys, os, re
from twisted.python import failure, usage
#errors
class DirectoryExists(OSError):
"""Some directory exists when it shouldn't."""
pass
class DirectoryDoesntExist(OSError):
"""Some directory doesn't exist when it should."""
pass
class CommandFailed(OSError):
pass
# utilities
def sh(command, null=True, prompt=False):
"""
I'll try to execute `command', and if `prompt' is true, I'll
ask before running it. If the command returns something other
than 0, I'll raise CommandFailed(command).
"""
print "--$", command
if prompt:
if raw_input("run ?? ").startswith('n'):
return
if null:
command = "%s > /dev/null" % command
if os.system(command) != 0:
raise CommandFailed(command)
def replaceInFile(filename, oldToNew):
"""
I replace the text `oldstr' with `newstr' in `filename' using sed
and mv.
"""
os.rename(filename, filename+'.bak')
f = open(filename+'.bak')
d = f.read()
f.close()
for k,v in oldToNew.items():
d = d.replace(k, v)
f = open(filename + '.new', 'w')
f.write(d)
f.close()
os.rename(filename+'.new', filename)
os.unlink(filename+'.bak')
def runChdirSafe(f, *args, **kw):
origdir = os.path.abspath('.')
try:
return f(*args, **kw)
finally:
os.chdir(origdir)
| 0.004916 |
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from ilastik.applets.base.appletSerializer import AppletSerializer, getOrCreateGroup, deleteIfPresent
import h5py
import numpy
import os
from watershed_segmentor import WatershedSegmentor
class PreprocessingSerializer( AppletSerializer ):
def __init__(self, preprocessingTopLevelOperator, *args, **kwargs):
super(PreprocessingSerializer, self).__init__(*args, **kwargs)
self._o = preprocessingTopLevelOperator
self.caresOfHeadless = True
def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
preproc = topGroup
for opPre in self._o.innerOperators:
mst = opPre._prepData[0]
if mst is not None:
#The values to be saved for sigma and filter are the
#values of the last valid preprocess
#!These may differ from the current settings!
deleteIfPresent(preproc, "sigma")
deleteIfPresent(preproc, "filter")
deleteIfPresent(preproc, "watershed_source")
deleteIfPresent(preproc, "invert_watershed_source")
deleteIfPresent(preproc, "graph")
preproc.create_dataset("sigma",data= opPre.initialSigma)
preproc.create_dataset("filter",data= opPre.initialFilter)
ws_source = str(opPre.WatershedSource.value)
assert isinstance( ws_source, str ), "WatershedSource was {}, but it should be a string.".format( ws_source )
preproc.create_dataset("watershed_source", data=ws_source)
preproc.create_dataset("invert_watershed_source", data=opPre.InvertWatershedSource.value)
preprocgraph = getOrCreateGroup(preproc, "graph")
mst.saveH5G(preprocgraph)
opPre._unsavedData = False
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath,headless = False):
assert "sigma" in topGroup.keys()
assert "filter" in topGroup.keys()
sigma = topGroup["sigma"].value
sfilter = topGroup["filter"].value
try:
watershed_source = str(topGroup["watershed_source"].value)
invert_watershed_source = bool(topGroup["invert_watershed_source"].value)
except KeyError:
watershed_source = None
invert_watershed_source = False
if "graph" in topGroup.keys():
graphgroup = topGroup["graph"]
else:
assert "graphfile" in topGroup.keys()
#feature: load preprocessed graph from file
filePath = topGroup["graphfile"].value
if not os.path.exists(filePath):
if headless:
raise RuntimeError("Could not find data at " + filePath)
filePath = self.repairFile(filePath,"*.h5")
graphgroup = h5py.File(filePath,"r")["graph"]
for opPre in self._o.innerOperators:
opPre.initialSigma = sigma
opPre.Sigma.setValue(sigma)
if watershed_source:
opPre.WatershedSource.setValue( watershed_source )
opPre.InvertWatershedSource.setValue( invert_watershed_source )
opPre.initialFilter = sfilter
opPre.Filter.setValue(sfilter)
mst = WatershedSegmentor(h5file=graphgroup)
opPre._prepData = numpy.array([mst])
opPre._dirty = False
opPre.applet.writeprotected = True
opPre.PreprocessedData.setDirty()
opPre.enableDownstream(True)
def isDirty(self):
for opPre in self._o.innerOperators:
if opPre._unsavedData:
return True
return False
#this is present only for the serializer AppletInterface
def unload(self):
pass
| 0.011202 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
from charms.docker.compose import Compose
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_any
from charms.reactive import when_not
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import is_leader
from charmhelpers.core.hookenv import leader_set
from charmhelpers.core.hookenv import leader_get
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.host import chdir
import tlslib
@when('leadership.is_leader')
def i_am_leader():
'''The leader is the Kubernetes master node. '''
leader_set({'master-address': hookenv.unit_private_ip()})
@when_not('tls.client.authorization.required')
def configure_easrsa():
'''Require the tls layer to generate certificates with "clientAuth". '''
# By default easyrsa generates the server certificates without clientAuth
# Setting this state before easyrsa is configured ensures the tls layer is
# configured to generate certificates with client authentication.
set_state('tls.client.authorization.required')
domain = hookenv.config().get('dns_domain')
cidr = hookenv.config().get('cidr')
sdn_ip = get_sdn_ip(cidr)
# Create extra sans that the tls layer will add to the server cert.
extra_sans = [
sdn_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
unitdata.kv().set('extra_sans', extra_sans)
@hook('config-changed')
def config_changed():
'''If the configuration values change, remove the available states.'''
config = hookenv.config()
if any(config.changed(key) for key in config.keys()):
hookenv.log('The configuration options have changed.')
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
if is_leader():
hookenv.log('Removing master container and kubelet.available state.') # noqa
# Stop and remove the Kubernetes kubelet container.
compose.kill('master')
compose.rm('master')
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
else:
hookenv.log('Removing kubelet container and kubelet.available state.') # noqa
# Stop and remove the Kubernetes kubelet container.
compose.kill('kubelet')
compose.rm('kubelet')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
hookenv.log('Removing proxy container and proxy.available state.')
# Stop and remove the Kubernetes proxy container.
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting proxy.
remove_state('proxy.available')
if config.changed('version'):
hookenv.log('The version changed removing the states so the new '
'version of kubectl will be downloaded.')
remove_state('kubectl.downloaded')
remove_state('kubeconfig.created')
@when('tls.server.certificate available')
@when_not('k8s.server.certificate available')
def server_cert():
'''When the server certificate is available, get the server certificate
from the charm unitdata and write it to the kubernetes directory. '''
server_cert = '/srv/kubernetes/server.crt'
server_key = '/srv/kubernetes/server.key'
# Save the server certificate from unit data to the destination.
tlslib.server_cert(None, server_cert, user='ubuntu', group='ubuntu')
# Copy the server key from the default location to the destination.
tlslib.server_key(None, server_key, user='ubuntu', group='ubuntu')
set_state('k8s.server.certificate available')
@when('tls.client.certificate available')
@when_not('k8s.client.certficate available')
def client_cert():
'''When the client certificate is available, get the client certificate
from the charm unitdata and write it to the kubernetes directory. '''
client_cert = '/srv/kubernetes/client.crt'
client_key = '/srv/kubernetes/client.key'
# Save the client certificate from the default location to the destination.
tlslib.client_cert(None, client_cert, user='ubuntu', group='ubuntu')
# Copy the client key from the default location to the destination.
tlslib.client_key(None, client_key, user='ubuntu', group='ubuntu')
set_state('k8s.client.certficate available')
@when('tls.certificate.authority available')
@when_not('k8s.certificate.authority available')
def ca():
'''When the Certificate Authority is available, copy the CA from the
default location to the /srv/kubernetes directory. '''
ca_crt = '/srv/kubernetes/ca.crt'
# Copy the Certificate Authority to the destination directory.
tlslib.ca(None, ca_crt, user='ubuntu', group='ubuntu')
set_state('k8s.certificate.authority available')
@when('kubelet.available', 'leadership.is_leader')
@when_not('kubedns.available', 'skydns.available')
def launch_dns():
'''Create the "kube-system" namespace, the kubedns resource controller,
and the kubedns service. '''
hookenv.log('Creating kubernetes kubedns on the master node.')
# Only launch and track this state on the leader.
# Launching duplicate kubeDNS rc will raise an error
# Run a command to check if the apiserver is responding.
return_code = call(split('kubectl cluster-info'))
if return_code != 0:
hookenv.log('kubectl command failed, waiting for apiserver to start.')
remove_state('kubedns.available')
# Return without setting kubedns.available so this method will retry.
return
# Check for the "kube-system" namespace.
return_code = call(split('kubectl get namespace kube-system'))
if return_code != 0:
# Create the kube-system namespace that is used by the kubedns files.
check_call(split('kubectl create namespace kube-system'))
# Check for the kubedns replication controller.
return_code = call(split('kubectl get -f files/manifests/kubedns-rc.yaml'))
if return_code != 0:
# Create the kubedns replication controller from the rendered file.
check_call(split('kubectl create -f files/manifests/kubedns-rc.yaml'))
# Check for the kubedns service.
return_code = call(split('kubectl get -f files/manifests/kubedns-svc.yaml'))
if return_code != 0:
# Create the kubedns service from the rendered file.
check_call(split('kubectl create -f files/manifests/kubedns-svc.yaml'))
set_state('kubedns.available')
@when('skydns.available', 'leadership.is_leader')
def convert_to_kubedns():
'''Delete the skydns containers to make way for the kubedns containers.'''
hookenv.log('Deleteing the old skydns deployment.')
# Delete the skydns replication controller.
return_code = call(split('kubectl delete rc kube-dns-v11'))
# Delete the skydns service.
return_code = call(split('kubectl delete svc kube-dns'))
remove_state('skydns.available')
@when('docker.available')
@when_not('etcd.available')
def relation_message():
'''Take over messaging to let the user know they are pending a relationship
to the ETCD cluster before going any further. '''
status_set('waiting', 'Waiting for relation to ETCD')
@when('kubeconfig.created')
@when('etcd.available')
@when_not('kubelet.available', 'proxy.available')
def start_kubelet(etcd):
'''Run the hyperkube container that starts the kubernetes services.
When the leader, run the master services (apiserver, controller, scheduler,
proxy)
using the master.json from the rendered manifest directory.
When a follower, start the node services (kubelet, and proxy). '''
render_files(etcd)
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
status_set('maintenance', 'Starting the Kubernetes services.')
if is_leader():
compose.up('master')
compose.up('proxy')
set_state('kubelet.available')
# Open the secure port for api-server.
hookenv.open_port(6443)
else:
# Start the Kubernetes kubelet container using docker-compose.
compose.up('kubelet')
set_state('kubelet.available')
# Start the Kubernetes proxy container using docker-compose.
compose.up('proxy')
set_state('proxy.available')
status_set('active', 'Kubernetes services started')
@when('docker.available')
@when_not('kubectl.downloaded')
def download_kubectl():
'''Download the kubectl binary to test and interact with the cluster.'''
status_set('maintenance', 'Downloading the kubectl binary')
version = hookenv.config()['version']
cmd = 'wget -nv -O /usr/local/bin/kubectl https://storage.googleapis.com' \
'/kubernetes-release/release/{0}/bin/linux/{1}/kubectl'
cmd = cmd.format(version, arch())
hookenv.log('Downloading kubelet: {0}'.format(cmd))
check_call(split(cmd))
cmd = 'chmod +x /usr/local/bin/kubectl'
check_call(split(cmd))
set_state('kubectl.downloaded')
@when('kubectl.downloaded', 'leadership.is_leader', 'k8s.certificate.authority available', 'k8s.client.certficate available') # noqa
@when_not('kubeconfig.created')
def master_kubeconfig():
'''Create the kubernetes configuration for the master unit. The master
should create a package with the client credentials so the user can
interact securely with the apiserver.'''
hookenv.log('Creating Kubernetes configuration for master node.')
directory = '/srv/kubernetes'
ca = '/srv/kubernetes/ca.crt'
key = '/srv/kubernetes/client.key'
cert = '/srv/kubernetes/client.crt'
# Get the public address of the apiserver so users can access the master.
server = 'https://{0}:{1}'.format(hookenv.unit_public_ip(), '6443')
# Create the client kubeconfig so users can access the master node.
create_kubeconfig(directory, server, ca, key, cert)
# Copy the kubectl binary to this directory.
cmd = 'cp -v /usr/local/bin/kubectl {0}'.format(directory)
check_call(split(cmd))
# Use a context manager to run the tar command in a specific directory.
with chdir(directory):
# Create a package with kubectl and the files to use it externally.
cmd = 'tar -cvzf /home/ubuntu/kubectl_package.tar.gz ca.crt ' \
'client.key client.crt kubectl kubeconfig'
check_call(split(cmd))
# This sets up the client workspace consistently on the leader and nodes.
node_kubeconfig()
set_state('kubeconfig.created')
@when('kubectl.downloaded', 'k8s.certificate.authority available', 'k8s.server.certificate available') # noqa
@when_not('kubeconfig.created', 'leadership.is_leader')
def node_kubeconfig():
'''Create the kubernetes configuration (kubeconfig) for this unit.
The the nodes will create a kubeconfig with the server credentials so
the services can interact securely with the apiserver.'''
hookenv.log('Creating Kubernetes configuration for worker node.')
directory = '/var/lib/kubelet'
ca = '/srv/kubernetes/ca.crt'
cert = '/srv/kubernetes/server.crt'
key = '/srv/kubernetes/server.key'
# Get the private address of the apiserver for communication between units.
server = 'https://{0}:{1}'.format(leader_get('master-address'), '6443')
# Create the kubeconfig for the other services.
kubeconfig = create_kubeconfig(directory, server, ca, key, cert)
# Install the kubeconfig in the root user's home directory.
install_kubeconfig(kubeconfig, '/root/.kube', 'root')
# Install the kubeconfig in the ubunut user's home directory.
install_kubeconfig(kubeconfig, '/home/ubuntu/.kube', 'ubuntu')
set_state('kubeconfig.created')
@when('proxy.available')
@when_not('cadvisor.available')
def start_cadvisor():
'''Start the cAdvisor container that gives metrics about the other
application containers on this system. '''
compose = Compose('files/kubernetes')
compose.up('cadvisor')
hookenv.open_port(8088)
status_set('active', 'cadvisor running on port 8088')
set_state('cadvisor.available')
@when('kubelet.available', 'kubeconfig.created')
@when_any('proxy.available', 'cadvisor.available', 'kubedns.available')
def final_message():
'''Issue some final messages when the services are started. '''
# TODO: Run a simple/quick health checks before issuing this message.
status_set('active', 'Kubernetes running.')
def gather_sdn_data():
'''Get the Software Defined Network (SDN) information and return it as a
dictionary. '''
sdn_data = {}
# The dictionary named 'pillar' is a construct of the k8s template files.
pillar = {}
# SDN Providers pass data via the unitdata.kv module
db = unitdata.kv()
# Ideally the DNS address should come from the sdn cidr.
subnet = db.get('sdn_subnet')
if subnet:
# Generate the DNS ip address on the SDN cidr (this is desired).
pillar['dns_server'] = get_dns_ip(subnet)
else:
# There is no SDN cider fall back to the kubernetes config cidr option.
pillar['dns_server'] = get_dns_ip(hookenv.config().get('cidr'))
# The pillar['dns_domain'] value is used in the kubedns-rc.yaml
pillar['dns_domain'] = hookenv.config().get('dns_domain')
# Use a 'pillar' dictionary so we can reuse the upstream kubedns templates.
sdn_data['pillar'] = pillar
return sdn_data
def install_kubeconfig(kubeconfig, directory, user):
'''Copy the a file from the target to a new directory creating directories
if necessary. '''
# The file and directory must be owned by the correct user.
chown = 'chown {0}:{0} {1}'
if not os.path.isdir(directory):
os.makedirs(directory)
# Change the ownership of the config file to the right user.
check_call(split(chown.format(user, directory)))
# kubectl looks for a file named "config" in the ~/.kube directory.
config = os.path.join(directory, 'config')
# Copy the kubeconfig file to the directory renaming it to "config".
cmd = 'cp -v {0} {1}'.format(kubeconfig, config)
check_call(split(cmd))
# Change the ownership of the config file to the right user.
check_call(split(chown.format(user, config)))
def create_kubeconfig(directory, server, ca, key, cert, user='ubuntu'):
'''Create a configuration for kubernetes in a specific directory using
the supplied arguments, return the path to the file.'''
context = 'default-context'
cluster_name = 'kubernetes'
# Ensure the destination directory exists.
if not os.path.isdir(directory):
os.makedirs(directory)
# The configuration file should be in this directory named kubeconfig.
kubeconfig = os.path.join(directory, 'kubeconfig')
# Create the config file with the address of the master server.
cmd = 'kubectl config set-cluster --kubeconfig={0} {1} ' \
'--server={2} --certificate-authority={3}'
check_call(split(cmd.format(kubeconfig, cluster_name, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config set-credentials --kubeconfig={0} {1} ' \
'--client-key={2} --client-certificate={3}'
check_call(split(cmd.format(kubeconfig, user, key, cert)))
# Create a default context with the cluster.
cmd = 'kubectl config set-context --kubeconfig={0} {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster_name, user)))
# Make the config use this new context.
cmd = 'kubectl config use-context --kubeconfig={0} {1}'
check_call(split(cmd.format(kubeconfig, context)))
hookenv.log('kubectl configuration created at {0}.'.format(kubeconfig))
return kubeconfig
def get_dns_ip(cidr):
'''Get an IP address for the DNS server on the provided cidr.'''
# Remove the range from the cidr.
ip = cidr.split('/')[0]
# Take the last octet off the IP address and replace it with 10.
return '.'.join(ip.split('.')[0:-1]) + '.10'
def get_sdn_ip(cidr):
'''Get the IP address for the SDN gateway based on the provided cidr.'''
# Remove the range from the cidr.
ip = cidr.split('/')[0]
# Remove the last octet and replace it with 1.
return '.'.join(ip.split('.')[0:-1]) + '.1'
def render_files(reldata=None):
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
# Load the context data with SDN data.
context.update(gather_sdn_data())
# Add the charm configuration data to the context.
context.update(hookenv.config())
if reldata:
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/etc/ssl/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
# Update the context so the template has the etcd information.
context.update({'etcd_dir': etcd_dir,
'connection_string': connection_string,
'etcd_ca': ca,
'etcd_key': key,
'etcd_cert': cert})
charm_dir = hookenv.charm_dir()
rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
if not os.path.exists(rendered_kube_dir):
os.makedirs(rendered_kube_dir)
rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
if not os.path.exists(rendered_manifest_dir):
os.makedirs(rendered_manifest_dir)
# Update the context with extra values, arch, manifest dir, and private IP.
context.update({'arch': arch(),
'master_address': leader_get('master-address'),
'manifest_directory': rendered_manifest_dir,
'public_address': hookenv.unit_get('public-address'),
'private_address': hookenv.unit_get('private-address')})
# Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
# Render the files/kubernetes/docker-compose.yml file that contains the
# definition for kubelet and proxy.
render('docker-compose.yml', target, context)
if is_leader():
# Source: https://github.com/kubernetes/...master/cluster/images/hyperkube # noqa
target = os.path.join(rendered_manifest_dir, 'master.json')
# Render the files/manifests/master.json that contains parameters for
# the apiserver, controller, and controller-manager
render('master.json', target, context)
# Source: ...cluster/addons/dns/skydns-svc.yaml.in
target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
# Render files/kubernetes/kubedns-svc.yaml for the DNS service.
render('kubedns-svc.yaml', target, context)
# Source: ...cluster/addons/dns/skydns-rc.yaml.in
target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
# Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
render('kubedns-rc.yaml', target, context)
def status_set(level, message):
'''Output status message with leadership information.'''
if is_leader():
message = '{0} (master) '.format(message)
hookenv.status_set(level, message)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
# Validate the architecture is supported by kubernetes.
if architecture not in ['amd64', 'arm', 'arm64', 'ppc64le']:
message = 'Unsupported machine architecture: {0}'.format(architecture)
status_set('blocked', message)
raise Exception(message)
return architecture
| 0.000047 |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["XRecordInterface", "AtSpiInterface"]
import os, threading, re, time, socket, select, logging, queue, subprocess
try:
import pyatspi
HAS_ATSPI = True
except ImportError:
HAS_ATSPI = False
from Xlib import X, XK, display, error
try:
from Xlib.ext import record, xtest
HAS_RECORD = True
except ImportError:
HAS_RECORD = False
from Xlib.protocol import rq, event
from . import common
if common.USING_QT:
from PyQt4.QtGui import QClipboard, QApplication
else:
from gi.repository import Gtk, Gdk
logger = logging.getLogger("interface")
MASK_INDEXES = [
(X.ShiftMapIndex, X.ShiftMask),
(X.ControlMapIndex, X.ControlMask),
(X.LockMapIndex, X.LockMask),
(X.Mod1MapIndex, X.Mod1Mask),
(X.Mod2MapIndex, X.Mod2Mask),
(X.Mod3MapIndex, X.Mod3Mask),
(X.Mod4MapIndex, X.Mod4Mask),
(X.Mod5MapIndex, X.Mod5Mask),
]
CAPSLOCK_LEDMASK = 1<<0
NUMLOCK_LEDMASK = 1<<1
class XInterfaceBase(threading.Thread):
"""
Encapsulates the common functionality for the two X interface classes.
"""
def __init__(self, mediator, app):
threading.Thread.__init__(self)
self.setDaemon(True)
self.setName("XInterface-thread")
self.mediator = mediator
self.app = app
self.lastChars = [] # QT4 Workaround
self.__enableQT4Workaround = False # QT4 Workaround
self.shutdown = False
# Event loop
self.eventThread = threading.Thread(target=self.__eventLoop)
self.queue = queue.Queue()
# Event listener
self.listenerThread = threading.Thread(target=self.__flushEvents)
if common.USING_QT:
self.clipBoard = QApplication.clipboard()
else:
self.clipBoard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.selection = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
self.__initMappings()
# Set initial lock state
ledMask = self.localDisplay.get_keyboard_control().led_mask
mediator.set_modifier_state(Key.CAPSLOCK, (ledMask & CAPSLOCK_LEDMASK) != 0)
mediator.set_modifier_state(Key.NUMLOCK, (ledMask & NUMLOCK_LEDMASK) != 0)
# Window name atoms
self.__NameAtom = self.localDisplay.intern_atom("_NET_WM_NAME", True)
self.__VisibleNameAtom = self.localDisplay.intern_atom("_NET_WM_VISIBLE_NAME", True)
if not common.USING_QT:
self.keyMap = Gdk.Keymap.get_default()
self.keyMap.connect("keys-changed", self.on_keys_changed)
self.__ignoreRemap = False
self.eventThread.start()
self.listenerThread.start()
def __eventLoop(self):
while True:
method, args = self.queue.get()
if method is None and args is None:
break
try:
method(*args)
except Exception as e:
logger.exception("Error in X event loop thread")
self.queue.task_done()
def __enqueue(self, method, *args):
self.queue.put_nowait((method, args))
def on_keys_changed(self, data=None):
if not self.__ignoreRemap:
logger.debug("Recorded keymap change event")
self.__ignoreRemap = True
time.sleep(0.2)
self.__enqueue(self.__ungrabAllHotkeys)
self.__enqueue(self.__delayedInitMappings)
else:
logger.debug("Ignored keymap change event")
def __delayedInitMappings(self):
self.__initMappings()
self.__ignoreRemap = False
def __initMappings(self):
self.localDisplay = display.Display()
self.rootWindow = self.localDisplay.screen().root
self.rootWindow.change_attributes(event_mask=X.SubstructureNotifyMask|X.StructureNotifyMask)
altList = self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)
self.__usableOffsets = (0, 1)
for code, offset in altList:
if code == 108 and offset == 0:
self.__usableOffsets += (4, 5)
logger.debug("Enabling sending using Alt-Grid")
break
# Build modifier mask mapping
self.modMasks = {}
mapping = self.localDisplay.get_modifier_mapping()
for keySym, ak in XK_TO_AK_MAP.items():
if ak in MODIFIERS:
keyCodeList = self.localDisplay.keysym_to_keycodes(keySym)
found = False
for keyCode, lvl in keyCodeList:
for index, mask in MASK_INDEXES:
if keyCode in mapping[index]:
self.modMasks[ak] = mask
found = True
break
if found: break
logger.debug("Modifier masks: %r", self.modMasks)
self.__grabHotkeys()
self.localDisplay.flush()
# --- get list of keycodes that are unused in the current keyboard mapping
keyCode = 8
avail = []
for keyCodeMapping in self.localDisplay.get_keyboard_mapping(keyCode, 200):
codeAvail = True
for offset in keyCodeMapping:
if offset != 0:
codeAvail = False
break
if codeAvail:
avail.append(keyCode)
keyCode += 1
self.__availableKeycodes = avail
self.remappedChars = {}
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
self.keymap_test()
def keymap_test(self):
code = self.localDisplay.keycode_to_keysym(108, 0)
for attr in XK.__dict__.items():
if attr[0].startswith("XK"):
if attr[1] == code:
logger.debug("Alt-Grid: %s, %s", attr[0], attr[1])
logger.debug(repr(self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)))
logger.debug("X Server Keymap")
for char in "\\|`1234567890-=~!@#$%^&*()qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?":
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
keyCodeList = list(keyCodeList)
if len(keyCodeList) > 0:
logger.debug("[%s] : %s", char, keyCodeList)
else:
logger.debug("No mapping for [%s]", char)
def __needsMutterWorkaround(self, item):
if Key.SUPER not in item.modifiers:
return False
try:
output = subprocess.check_output(["ps", "-eo", "command"]).decode()
except subprocess.CalledProcessError:
pass # since this is just a nasty workaround, if anything goes wrong just disable it
else:
lines = output.splitlines()
for line in lines:
if "gnome-shell" in line or "cinnamon" in line or "unity" in line:
return True
return False
def __grabHotkeys(self):
"""
Run during startup to grab global and specific hotkeys in all open windows
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Grab global hotkeys in root window
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
# Grab hotkeys without a filter in root window
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys)
def __recurseTree(self, parent, hotkeys):
# Grab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
self.__enqueue(self.__recurseTree, window, hotkeys)
except:
logger.exception("grab on window failed")
def __ungrabAllHotkeys(self):
"""
Ungrab all hotkeys in preparation for keymap change
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Ungrab global hotkeys in root window, recursively
for item in c.globalHotkeys:
if item.enabled:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
# Ungrab hotkeys without a filter in root window, recursively
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
self.__recurseTreeUngrab(self.rootWindow, hotkeys)
def __recurseTreeUngrab(self, parent, hotkeys):
# Ungrab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
self.__enqueue(self.__recurseTreeUngrab, window, hotkeys)
except:
logger.exception("ungrab on window failed")
def __grabHotkeysForWindow(self, window):
"""
Grab all hotkeys relevant to the window
Used when a new window is created
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
title = self.get_window_title(window)
klass = self.get_window_class(window)
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
elif self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
def __grabHotkey(self, key, modifiers, window):
"""
Grab a specific hotkey in the given window
"""
logger.debug("Grabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync)
if Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
except Exception as e:
logger.warn("Failed to grab hotkey %r %r: %s", modifiers, key, str(e))
def grab_hotkey(self, item):
"""
Grab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and grab from matching windows
"""
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
else:
self.__enqueue(self.__grabRecurse, item, self.rootWindow)
def __grabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
else:
self.__grabRecurse(item, window)
def ungrab_hotkey(self, item):
"""
Ungrab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and ungrab from matching windows
"""
import copy
newItem = copy.copy(item)
if item.get_applicable_regex() is None:
self.__enqueue(self.__ungrabHotkey, newItem.hotKey, newItem.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow, False)
else:
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow)
def __ungrabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
else:
self.__ungrabRecurse(item, window)
def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception as e:
logger.warn("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e))
def lookup_string(self, keyCode, shifted, numlock, altGrid):
if keyCode == 0:
return "<unknown>"
keySym = self.localDisplay.keycode_to_keysym(keyCode, 0)
if keySym in XK_TO_AK_NUMLOCKED and numlock and not (numlock and shifted):
return XK_TO_AK_NUMLOCKED[keySym]
elif keySym in XK_TO_AK_MAP:
return XK_TO_AK_MAP[keySym]
else:
index = 0
if shifted: index += 1
if altGrid: index += 4
try:
return chr(self.localDisplay.keycode_to_keysym(keyCode, index))
except ValueError:
return "<code%d>" % keyCode
def send_string_clipboard(self, string, pasteCommand):
self.__enqueue(self.__sendStringClipboard, string, pasteCommand)
def __sendStringClipboard(self, string, pasteCommand):
logger.debug("Sending string: %r", string)
if pasteCommand is None:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillSelection, string)
self.sem.acquire()
else:
self.__fillSelection(string)
focus = self.localDisplay.get_input_focus().focus
xtest.fake_input(focus, X.ButtonPress, X.Button2)
xtest.fake_input(focus, X.ButtonRelease, X.Button2)
else:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillClipboard, string)
self.sem.acquire()
else:
self.__fillClipboard(string)
self.mediator.send_string(pasteCommand)
if common.USING_QT:
self.app.exec_in_main(self.__restoreClipboard)
logger.debug("Send via clipboard done")
def __restoreClipboard(self):
if self.__savedClipboard != "":
if common.USING_QT:
self.clipBoard.setText(self.__savedClipboard, QClipboard.Clipboard)
else:
Gdk.threads_enter()
self.clipBoard.set_text(self.__savedClipboard)
Gdk.threads_leave()
def __fillSelection(self, string):
if common.USING_QT:
self.clipBoard.setText(string, QClipboard.Selection)
self.sem.release()
else:
Gdk.threads_enter()
self.selection.set_text(string)
# self.selection.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def __fillClipboard(self, string):
if common.USING_QT:
self.__savedClipboard = self.clipBoard.text()
self.clipBoard.setText(string, QClipboard.Clipboard)
self.sem.release()
else:
Gdk.threads_enter()
text = self.clipBoard.wait_for_text()
self.__savedClipboard = ''
if text is not None: self.__savedClipboard = text
if Gtk.get_major_version() >= 3:
self.clipBoard.set_text(string, -1)
else:
self.clipBoard.set_text(string)
# self.clipBoard.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def begin_send(self):
self.__enqueue(self.__grab_keyboard)
def finish_send(self):
self.__enqueue(self.__ungrabKeyboard)
def grab_keyboard(self):
self.__enqueue(self.__grab_keyboard)
def __grab_keyboard(self):
focus = self.localDisplay.get_input_focus().focus
focus.grab_keyboard(True, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.localDisplay.flush()
def ungrab_keyboard(self):
self.__enqueue(self.__ungrabKeyboard)
def __ungrabKeyboard(self):
self.localDisplay.ungrab_keyboard(X.CurrentTime)
self.localDisplay.flush()
def __findUsableKeycode(self, codeList):
for code, offset in codeList:
if offset in self.__usableOffsets:
return code, offset
return None, None
def send_string(self, string):
self.__enqueue(self.__sendString, string)
def __sendString(self, string):
"""
Send a string of printable characters.
"""
logger.debug("Sending string: %r", string)
# Determine if workaround is needed
if not ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND]:
self.__checkWorkaroundNeeded()
# First find out if any chars need remapping
remapNeeded = False
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None and char not in self.remappedChars:
remapNeeded = True
break
# Now we know chars need remapping, do it
if remapNeeded:
self.__ignoreRemap = True
self.remappedChars = {}
remapChars = []
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None:
remapChars.append(char)
logger.debug("Characters requiring remapping: %r", remapChars)
availCodes = self.__availableKeycodes
logger.debug("Remapping with keycodes in the range: %r", availCodes)
mapping = self.localDisplay.get_keyboard_mapping(8, 200)
firstCode = 8
for i in range(len(availCodes) - 1):
code = availCodes[i]
sym1 = 0
sym2 = 0
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 0)
sym1 = ord(char)
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 1)
sym2 = ord(char)
if sym1 != 0:
mapping[code - firstCode][0] = sym1
mapping[code - firstCode][1] = sym2
mapping = [tuple(l) for l in mapping]
self.localDisplay.change_keyboard_mapping(firstCode, mapping)
self.localDisplay.flush()
focus = self.localDisplay.get_input_focus().focus
for char in string:
try:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
keyCode, offset = self.__findUsableKeycode(keyCodeList)
if keyCode is not None:
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
if offset == 4:
self.__pressKey(Key.ALT_GR)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR], focus)
self.__releaseKey(Key.ALT_GR)
if offset == 5:
self.__pressKey(Key.ALT_GR)
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR]|self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
self.__releaseKey(Key.ALT_GR)
elif char in self.remappedChars:
keyCode, offset = self.remappedChars[char]
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
else:
logger.warn("Unable to send character %r", char)
except Exception as e:
logger.exception("Error sending char %r: %s", char, str(e))
self.__ignoreRemap = False
def send_key(self, keyName):
"""
Send a specific non-printing key, eg Up, Left, etc
"""
self.__enqueue(self.__sendKey, keyName)
def __sendKey(self, keyName):
logger.debug("Send special key: [%r]", keyName)
self.__sendKeyCode(self.__lookupKeyCode(keyName))
def fake_keypress(self, keyName):
self.__enqueue(self.__fakeKeypress, keyName)
def __fakeKeypress(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def fake_keydown(self, keyName):
self.__enqueue(self.__fakeKeydown, keyName)
def __fakeKeydown(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
def fake_keyup(self, keyName):
self.__enqueue(self.__fakeKeyup, keyName)
def __fakeKeyup(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def send_modified_key(self, keyName, modifiers):
"""
Send a modified key (e.g. when emulating a hotkey)
"""
self.__enqueue(self.__sendModifiedKey, keyName, modifiers)
def __sendModifiedKey(self, keyName, modifiers):
logger.debug("Send modified key: modifiers: %s key: %s", modifiers, keyName)
try:
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
keyCode = self.__lookupKeyCode(keyName)
for mod in modifiers: self.__pressKey(mod)
self.__sendKeyCode(keyCode, mask)
for mod in modifiers: self.__releaseKey(mod)
except Exception as e:
logger.warn("Error sending modified key %r %r: %s", modifiers, keyName, str(e))
def send_mouse_click(self, xCoord, yCoord, button, relative):
self.__enqueue(self.__sendMouseClick, xCoord, yCoord, button, relative)
def __sendMouseClick(self, xCoord, yCoord, button, relative):
# Get current pointer position so we can return it there
pos = self.rootWindow.query_pointer()
if relative:
focus = self.localDisplay.get_input_focus().focus
focus.warp_pointer(xCoord, yCoord)
xtest.fake_input(focus, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(focus, X.ButtonRelease, button, x=xCoord, y=yCoord)
else:
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def send_mouse_click_relative(self, xoff, yoff, button):
self.__enqueue(self.__sendMouseClickRelative, xoff, yoff, button)
def __sendMouseClickRelative(self, xoff, yoff, button):
# Get current pointer position
pos = self.rootWindow.query_pointer()
xCoord = pos.root_x + xoff
yCoord = pos.root_y + yoff
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def flush(self):
self.__enqueue(self.__flush)
def __flush(self):
self.localDisplay.flush()
self.lastChars = []
def press_key(self, keyName):
self.__enqueue(self.__pressKey, keyName)
def __pressKey(self, keyName):
self.__sendKeyPressEvent(self.__lookupKeyCode(keyName), 0)
def release_key(self, keyName):
self.__enqueue(self.__releaseKey, keyName)
def __releaseKey(self, keyName):
self.__sendKeyReleaseEvent(self.__lookupKeyCode(keyName), 0)
def __flushEvents(self):
while True:
try:
readable, w, e = select.select([self.localDisplay], [], [], 1)
time.sleep(1)
if self.localDisplay in readable:
createdWindows = []
destroyedWindows = []
for x in range(self.localDisplay.pending_events()):
event = self.localDisplay.next_event()
if event.type == X.CreateNotify:
createdWindows.append(event.window)
if event.type == X.DestroyNotify:
destroyedWindows.append(event.window)
for window in createdWindows:
if window not in destroyedWindows:
self.__enqueue(self.__grabHotkeysForWindow, window)
if self.shutdown:
break
except:
pass
def handle_keypress(self, keyCode):
self.__enqueue(self.__handleKeyPress, keyCode)
def __handleKeyPress(self, keyCode):
focus = self.localDisplay.get_input_focus().focus
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_down(modifier)
else:
self.mediator.handle_keypress(keyCode, self.get_window_title(focus), self.get_window_class(focus))
def handle_keyrelease(self, keyCode):
self.__enqueue(self.__handleKeyrelease, keyCode)
def __handleKeyrelease(self, keyCode):
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_up(modifier)
def handle_mouseclick(self, button, x, y):
self.__enqueue(self.__handleMouseclick, button, x, y)
def __handleMouseclick(self, button, x, y):
title = self.get_window_title()
klass = self.get_window_class()
info = (title, klass)
if x is None and y is None:
ret = self.localDisplay.get_input_focus().focus.query_pointer()
self.mediator.handle_mouse_click(ret.root_x, ret.root_y, ret.win_x, ret.win_y, button, info)
else:
focus = self.localDisplay.get_input_focus().focus
try:
rel = focus.translate_coords(self.rootWindow, x, y)
self.mediator.handle_mouse_click(x, y, rel.x, rel.y, button, info)
except:
self.mediator.handle_mouse_click(x, y, 0, 0, button, info)
def __decodeModifier(self, keyCode):
"""
Checks if the given keyCode is a modifier key. If it is, returns the modifier name
constant as defined in the iomediator module. If not, returns C{None}
"""
keyName = self.lookup_string(keyCode, False, False, False)
if keyName in MODIFIERS:
return keyName
return None
def __sendKeyCode(self, keyCode, modifiers=0, theWindow=None):
if ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND] or self.__enableQT4Workaround:
self.__doQT4Workaround(keyCode)
self.__sendKeyPressEvent(keyCode, modifiers, theWindow)
self.__sendKeyReleaseEvent(keyCode, modifiers, theWindow)
def __checkWorkaroundNeeded(self):
focus = self.localDisplay.get_input_focus().focus
windowName = self.get_window_title(focus)
windowClass = self.get_window_class(focus)
w = self.app.configManager.workAroundApps
if w.match(windowName) or w.match(windowClass):
self.__enableQT4Workaround = True
else:
self.__enableQT4Workaround = False
def __doQT4Workaround(self, keyCode):
if len(self.lastChars) > 0:
if keyCode in self.lastChars:
self.localDisplay.flush()
time.sleep(0.0125)
self.lastChars.append(keyCode)
if len(self.lastChars) > 10:
self.lastChars.pop(0)
def __sendKeyPressEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyPress(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __sendKeyReleaseEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyRelease(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __lookupKeyCode(self, char):
if char in AK_TO_XK_MAP:
return self.localDisplay.keysym_to_keycode(AK_TO_XK_MAP[char])
elif char.startswith("<code"):
return int(char[5:-1])
else:
try:
return self.localDisplay.keysym_to_keycode(ord(char))
except Exception as e:
logger.error("Unknown key name: %s", char)
raise
def get_window_title(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinTitle(windowvar, traverse)
except AttributeError as e:
if str(e)=="'int' object has no attribute 'get_property'":
return ""
raise
except error.BadWindow as e:#TODO_PY3
print(__name__, repr(e))
return ""
except: # Default handler
return ""
def __getWinTitle(self, windowvar, traverse):
atom = windowvar.get_property(self.__VisibleNameAtom, 0, 0, 255)
if atom is None:
atom = windowvar.get_property(self.__NameAtom, 0, 0, 255)
if atom:
return atom.value #.decode("utf-8")
elif traverse:
return self.__getWinTitle(windowvar.query_tree().parent, True)
else:
return ""
def get_window_class(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinClass(windowvar, traverse)
except AttributeError as e:
if str(e)=="'int' object has no attribute 'get_wm_class'":
return ""
raise
except error.BadWindow as e:#TODO_PY3
print(__name__, repr(e))
return ""
# except:
# return ""
def __getWinClass(self, windowvar, traverse):
wmclass = windowvar.get_wm_class()
if (wmclass == None or wmclass == ""):
if traverse:
return self.__getWinClass(windowvar.query_tree().parent, True)
else:
return ""
return wmclass[0] + '.' + wmclass[1]
def cancel(self):
self.queue.put_nowait((None, None))
self.shutdown = True
self.listenerThread.join()
self.eventThread.join()
self.localDisplay.flush()
self.localDisplay.close()
self.join()
class XRecordInterface(XInterfaceBase):
def initialise(self):
self.recordDisplay = display.Display()
self.__locksChecked = False
# Check for record extension
if not self.recordDisplay.has_extension("RECORD"):
raise Exception("Your X-Server does not have the RECORD extension available/enabled.")
def run(self):
# Create a recording context; we only want key and mouse events
self.ctx = self.recordDisplay.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.ButtonPress), #X.KeyRelease,
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
logger.info("XRecord interface thread starting")
self.recordDisplay.record_enable_context(self.ctx, self.__processEvent)
# Finally free the context
self.recordDisplay.record_free_context(self.ctx)
self.recordDisplay.close()
def cancel(self):
self.localDisplay.record_disable_context(self.ctx)
XInterfaceBase.cancel(self)
def __processEvent(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
if not len(reply.data) or reply.data[0] < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.recordDisplay.display, None, None)
if event.type == X.KeyPress:
self.handle_keypress(event.detail)
elif event.type == X.KeyRelease:
self.handle_keyrelease(event.detail)
elif event.type == X.ButtonPress:
self.handle_mouseclick(event.detail, event.root_x, event.root_y)
class AtSpiInterface(XInterfaceBase):
def initialise(self):
self.registry = pyatspi.Registry
def start(self):
logger.info("AT-SPI interface thread starting")
self.registry.registerKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.registerEventListener(self.__processMouseEvent, 'mouse:button')
def cancel(self):
self.registry.deregisterKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.deregisterEventListener(self.__processMouseEvent, 'mouse:button')
self.registry.stop()
XInterfaceBase.cancel(self)
def __processKeyEvent(self, event):
if event.type == pyatspi.KEY_PRESSED_EVENT:
self.handle_keypress(event.hw_code)
else:
self.handle_keyrelease(event.hw_code)
def __processMouseEvent(self, event):
if event.type[-1] == 'p':
button = int(event.type[-2])
self.handle_mouseclick(button, event.detail1, event.detail2)
def __pumpEvents(self):
pyatspi.Registry.pumpQueuedEvents()
return True
from .iomediator_constants import MODIFIERS
from .iomediator_Key import Key
# from .iomediator import Key, MODIFIERS
from .configmanager import *
XK.load_keysym_group('xkb')
XK_TO_AK_MAP = {
XK.XK_Shift_L : Key.SHIFT,
XK.XK_Shift_R : Key.SHIFT,
XK.XK_Caps_Lock : Key.CAPSLOCK,
XK.XK_Control_L : Key.CONTROL,
XK.XK_Control_R : Key.CONTROL,
XK.XK_Alt_L : Key.ALT,
XK.XK_Alt_R : Key.ALT,
XK.XK_ISO_Level3_Shift : Key.ALT_GR,
XK.XK_Super_L : Key.SUPER,
XK.XK_Super_R : Key.SUPER,
XK.XK_Hyper_L : Key.HYPER,
XK.XK_Hyper_R : Key.HYPER,
XK.XK_Meta_L : Key.META,
XK.XK_Meta_R : Key.META,
XK.XK_Num_Lock : Key.NUMLOCK,
#SPACE : Key.SPACE,
XK.XK_Tab : Key.TAB,
XK.XK_Left : Key.LEFT,
XK.XK_Right : Key.RIGHT,
XK.XK_Up : Key.UP,
XK.XK_Down : Key.DOWN,
XK.XK_Return : Key.ENTER,
XK.XK_BackSpace : Key.BACKSPACE,
XK.XK_Scroll_Lock : Key.SCROLL_LOCK,
XK.XK_Print : Key.PRINT_SCREEN,
XK.XK_Pause : Key.PAUSE,
XK.XK_Menu : Key.MENU,
XK.XK_F1 : Key.F1,
XK.XK_F2 : Key.F2,
XK.XK_F3 : Key.F3,
XK.XK_F4 : Key.F4,
XK.XK_F5 : Key.F5,
XK.XK_F6 : Key.F6,
XK.XK_F7 : Key.F7,
XK.XK_F8 : Key.F8,
XK.XK_F9 : Key.F9,
XK.XK_F10 : Key.F10,
XK.XK_F11 : Key.F11,
XK.XK_F12 : Key.F12,
XK.XK_Escape : Key.ESCAPE,
XK.XK_Insert : Key.INSERT,
XK.XK_Delete : Key.DELETE,
XK.XK_Home : Key.HOME,
XK.XK_End : Key.END,
XK.XK_Page_Up : Key.PAGE_UP,
XK.XK_Page_Down : Key.PAGE_DOWN,
XK.XK_KP_Insert : Key.NP_INSERT,
XK.XK_KP_Delete : Key.NP_DELETE,
XK.XK_KP_End : Key.NP_END,
XK.XK_KP_Down : Key.NP_DOWN,
XK.XK_KP_Page_Down : Key.NP_PAGE_DOWN,
XK.XK_KP_Left : Key.NP_LEFT,
XK.XK_KP_Begin : Key.NP_5,
XK.XK_KP_Right : Key.NP_RIGHT,
XK.XK_KP_Home : Key.NP_HOME,
XK.XK_KP_Up: Key.NP_UP,
XK.XK_KP_Page_Up : Key.NP_PAGE_UP,
XK.XK_KP_Divide : Key.NP_DIVIDE,
XK.XK_KP_Multiply : Key.NP_MULTIPLY,
XK.XK_KP_Add : Key.NP_ADD,
XK.XK_KP_Subtract : Key.NP_SUBTRACT,
XK.XK_KP_Enter : Key.ENTER,
XK.XK_space : ' '
}
AK_TO_XK_MAP = dict((v,k) for k, v in XK_TO_AK_MAP.items())
XK_TO_AK_NUMLOCKED = {
XK.XK_KP_Insert : "0",
XK.XK_KP_Delete : ".",
XK.XK_KP_End : "1",
XK.XK_KP_Down : "2",
XK.XK_KP_Page_Down : "3",
XK.XK_KP_Left : "4",
XK.XK_KP_Begin : "5",
XK.XK_KP_Right : "6",
XK.XK_KP_Home : "7",
XK.XK_KP_Up: "8",
XK.XK_KP_Page_Up : "9",
XK.XK_KP_Divide : "/",
XK.XK_KP_Multiply : "*",
XK.XK_KP_Add : "+",
XK.XK_KP_Subtract : "-",
XK.XK_KP_Enter : Key.ENTER
}
class MockMediator:
"""
Mock IoMediator for testing purposes.
"""
def handle_modifier_down(self, modifier):
pass
def handle_modifier_up(self, modifier):
pass
def handle_keypress(self, keyCode, windowName):
pass
def handle_mouse_click(self):
pass
if __name__ == "__main__":
import time
x = XLibInterface(MockMediator(), True)
x.start()
x.keymap_test()
time.sleep(10.0)
#time.sleep(4.0)
#x.send_unicode_key([0, 3, 9, 4])
x.cancel()
print("Test completed. Thank you for your assistance in improving AutoKey!")
| 0.005877 |
# encoding: utf-8
"""
@author: monitor1379
@contact: [email protected]
@site: www.monitor1379.com
@version: 1.0
@license: GNU General Public License(Version 3)
@file: example1_or_nn.py
@time: 2016/10/11 0:07
Use Hamaa to build a mlp to solve the "or problem".
This case includes all the necessary functions
for creating, training and testing a neural network.
"""
from hamaa.datasets import datasets
from hamaa.layers import Dense, Activation
from hamaa.models import Sequential
from hamaa.optimizers import SGD
# 1. create a model
model = Sequential()
# 2. add a full connected layer to model
model.add(Dense(input_dim=2, output_dim=2, init='uniform'))
# 3. add a activation layer to model
model.add(Activation('sigmoid'))
# 4. use "mean square error" as the objective of model
model.set_objective('mse')
# 5. use "stochastic gradient descent" as the optimizerof model
model.set_optimizer(SGD(lr=0.9, momentum=0.9, decay=1e-6))
# 6. print the summary of model
print model.summary()
# 7. load "or" data, note that the label "y" is one hot
# x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# y = np.array([[1, 0], [0, 1], [0, 1], [0, 1]])
x, y = datasets.load_or_data()
# 8. train the neural network
model.train(training_data=(x, y), nb_epochs=10)
# 9. evaluate the accuracy on data
print 'test accuracy: ', model.evaluate_accuracy(x, y)
| 0.001471 |
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
sys.path.append("/usr/share/rhn")
import string
import types
import commands
def create_crontab_line(minute = None,\
hour = None,\
dom = None,\
month = None,\
dow = None,
command = "python /usr/share/rhn/virtualization/poller.py"):
user = "root"
if minute == None:
minute = "*"
if hour == None:
hour = "*"
if dom == None:
dom = "*"
if month == None:
month = "*"
if dow == None:
dow = "*"
if type(minute) != types.StringType:
minute = string.strip(str(minute))
if type(hour) != types.StringType:
hour = string.strip(str(hour))
if type(dom) != types.StringType:
dom = string.strip(str(dom))
if type(month) != types.StringType:
month = string.strip(str(month))
if type(dow) != types.StringType:
dow = string.strip(str(dow))
str_template = "%s %s %s %s %s %s %s\n"
output_string = str_template % (minute, hour, dom, month, dow, user, command)
return output_string
def schedule_poller(minute=None, hour=None, dom=None, month=None, dow=None):
try:
#create a crontab file
filename = "/etc/cron.d/rhn-virtualization.cron"
cronfile = open(filename, "w")
#create a crontab line
cron_line = create_crontab_line(minute, hour, dom, month, dow)
#write crontab line to the temp file
cronfile.write(cron_line)
#close the temp file
cronfile.close()
except Exception, e:
return (1, str(e))
#pass the temp file to crontab
status, output = commands.getstatusoutput("/sbin/service crond restart")
if status != 0:
return (1, "Attempt to schedule poller failed: %s, %s" % (str(status), str(output)))
else:
return (0, "Scheduling of poller succeeded!")
if __name__ == "__main__":
schedule_poller(minute="0-59/2")
| 0.019326 |
"""Constants for the Awair component."""
from dataclasses import dataclass
from datetime import timedelta
import logging
from python_awair.devices import AwairDevice
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
)
API_CO2 = "carbon_dioxide"
API_DUST = "dust"
API_HUMID = "humidity"
API_LUX = "illuminance"
API_PM10 = "particulate_matter_10"
API_PM25 = "particulate_matter_2_5"
API_SCORE = "score"
API_SPL_A = "sound_pressure_level"
API_TEMP = "temperature"
API_TIMEOUT = 20
API_VOC = "volatile_organic_compounds"
ATTRIBUTION = "Awair air quality sensor"
ATTR_LABEL = "label"
ATTR_UNIT = "unit"
ATTR_UNIQUE_ID = "unique_id"
DOMAIN = "awair"
DUST_ALIASES = [API_PM25, API_PM10]
LOGGER = logging.getLogger(__package__)
UPDATE_INTERVAL = timedelta(minutes=5)
SENSOR_TYPES = {
API_SCORE: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: PERCENTAGE,
ATTR_LABEL: "Awair score",
ATTR_UNIQUE_ID: "score", # matches legacy format
},
API_HUMID: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,
ATTR_ICON: None,
ATTR_UNIT: PERCENTAGE,
ATTR_LABEL: "Humidity",
ATTR_UNIQUE_ID: "HUMID", # matches legacy format
},
API_LUX: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_ILLUMINANCE,
ATTR_ICON: None,
ATTR_UNIT: LIGHT_LUX,
ATTR_LABEL: "Illuminance",
ATTR_UNIQUE_ID: "illuminance",
},
API_SPL_A: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:ear-hearing",
ATTR_UNIT: "dBa",
ATTR_LABEL: "Sound level",
ATTR_UNIQUE_ID: "sound_level",
},
API_VOC: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:cloud",
ATTR_UNIT: CONCENTRATION_PARTS_PER_BILLION,
ATTR_LABEL: "Volatile organic compounds",
ATTR_UNIQUE_ID: "VOC", # matches legacy format
},
API_TEMP: {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_UNIT: TEMP_CELSIUS,
ATTR_LABEL: "Temperature",
ATTR_UNIQUE_ID: "TEMP", # matches legacy format
},
API_PM25: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
ATTR_LABEL: "PM2.5",
ATTR_UNIQUE_ID: "PM25", # matches legacy format
},
API_PM10: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_UNIT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
ATTR_LABEL: "PM10",
ATTR_UNIQUE_ID: "PM10", # matches legacy format
},
API_CO2: {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:cloud",
ATTR_UNIT: CONCENTRATION_PARTS_PER_MILLION,
ATTR_LABEL: "Carbon dioxide",
ATTR_UNIQUE_ID: "CO2", # matches legacy format
},
}
@dataclass
class AwairResult:
"""Wrapper class to hold an awair device and set of air data."""
device: AwairDevice
air_data: dict
| 0 |
# type: ignore
# Tests for aiohttp/worker.py
import asyncio
import os
import socket
import ssl
from unittest import mock
import pytest
from aiohttp import web
base_worker = pytest.importorskip("aiohttp.worker")
try:
import uvloop
except ImportError:
uvloop = None
WRONG_LOG_FORMAT = '%a "%{Referrer}i" %(h)s %(l)s %s'
ACCEPTABLE_LOG_FORMAT = '%a "%{Referrer}i" %s'
# tokio event loop does not allow to override attributes
def skip_if_no_dict(loop):
if not hasattr(loop, "__dict__"):
pytest.skip("can not override loop attributes")
class BaseTestWorker:
def __init__(self):
self.servers = {}
self.exit_code = 0
self._notify_waiter = None
self.cfg = mock.Mock()
self.cfg.graceful_timeout = 100
self.pid = "pid"
self.wsgi = web.Application()
class AsyncioWorker(BaseTestWorker, base_worker.GunicornWebWorker):
pass
PARAMS = [AsyncioWorker]
if uvloop is not None:
class UvloopWorker(BaseTestWorker, base_worker.GunicornUVLoopWebWorker):
pass
PARAMS.append(UvloopWorker)
@pytest.fixture(params=PARAMS)
def worker(request, loop):
asyncio.set_event_loop(loop)
ret = request.param()
ret.notify = mock.Mock()
return ret
def test_init_process(worker) -> None:
with mock.patch("aiohttp.worker.asyncio") as m_asyncio:
try:
worker.init_process()
except TypeError:
pass
assert m_asyncio.new_event_loop.called
assert m_asyncio.set_event_loop.called
def test_run(worker, loop) -> None:
worker.log = mock.Mock()
worker.cfg = mock.Mock()
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.is_ssl = False
worker.sockets = []
worker.loop = loop
with pytest.raises(SystemExit):
worker.run()
worker.log.exception.assert_not_called()
assert loop.is_closed()
def test_run_async_factory(worker, loop) -> None:
worker.log = mock.Mock()
worker.cfg = mock.Mock()
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.is_ssl = False
worker.sockets = []
app = worker.wsgi
async def make_app():
return app
worker.wsgi = make_app
worker.loop = loop
worker.alive = False
with pytest.raises(SystemExit):
worker.run()
worker.log.exception.assert_not_called()
assert loop.is_closed()
def test_run_not_app(worker, loop) -> None:
worker.log = mock.Mock()
worker.cfg = mock.Mock()
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.loop = loop
worker.wsgi = "not-app"
worker.alive = False
with pytest.raises(SystemExit):
worker.run()
worker.log.exception.assert_called_with("Exception in gunicorn worker")
assert loop.is_closed()
def test_handle_quit(worker, loop) -> None:
worker.loop = mock.Mock()
worker.handle_quit(object(), object())
assert not worker.alive
assert worker.exit_code == 0
worker.loop.call_later.asset_called_with(0.1, worker._notify_waiter_done)
def test_handle_abort(worker) -> None:
with mock.patch("aiohttp.worker.sys") as m_sys:
worker.handle_abort(object(), object())
assert not worker.alive
assert worker.exit_code == 1
m_sys.exit.assert_called_with(1)
def test__wait_next_notify(worker) -> None:
worker.loop = mock.Mock()
worker._notify_waiter_done = mock.Mock()
fut = worker._wait_next_notify()
assert worker._notify_waiter == fut
worker.loop.call_later.assert_called_with(1.0, worker._notify_waiter_done, fut)
def test__notify_waiter_done(worker) -> None:
worker._notify_waiter = None
worker._notify_waiter_done()
assert worker._notify_waiter is None
waiter = worker._notify_waiter = mock.Mock()
worker._notify_waiter.done.return_value = False
worker._notify_waiter_done()
assert worker._notify_waiter is None
waiter.set_result.assert_called_with(True)
def test__notify_waiter_done_explicit_waiter(worker) -> None:
worker._notify_waiter = None
assert worker._notify_waiter is None
waiter = worker._notify_waiter = mock.Mock()
waiter.done.return_value = False
waiter2 = worker._notify_waiter = mock.Mock()
worker._notify_waiter_done(waiter)
assert worker._notify_waiter is waiter2
waiter.set_result.assert_called_with(True)
assert not waiter2.set_result.called
def test_init_signals(worker) -> None:
worker.loop = mock.Mock()
worker.init_signals()
assert worker.loop.add_signal_handler.called
@pytest.mark.parametrize(
"source,result",
[
(ACCEPTABLE_LOG_FORMAT, ACCEPTABLE_LOG_FORMAT),
(
AsyncioWorker.DEFAULT_GUNICORN_LOG_FORMAT,
AsyncioWorker.DEFAULT_AIOHTTP_LOG_FORMAT,
),
],
)
def test__get_valid_log_format_ok(worker, source, result) -> None:
assert result == worker._get_valid_log_format(source)
def test__get_valid_log_format_exc(worker) -> None:
with pytest.raises(ValueError) as exc:
worker._get_valid_log_format(WRONG_LOG_FORMAT)
assert "%(name)s" in str(exc.value)
async def test__run_ok_parent_changed(worker, loop, aiohttp_unused_port) -> None:
skip_if_no_dict(loop)
worker.ppid = 0
worker.alive = True
sock = socket.socket()
addr = ("localhost", aiohttp_unused_port())
sock.bind(addr)
worker.sockets = [sock]
worker.log = mock.Mock()
worker.loop = loop
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.max_requests = 0
worker.cfg.is_ssl = False
await worker._run()
worker.notify.assert_called_with()
worker.log.info.assert_called_with("Parent changed, shutting down: %s", worker)
async def test__run_exc(worker, loop, aiohttp_unused_port) -> None:
skip_if_no_dict(loop)
worker.ppid = os.getppid()
worker.alive = True
sock = socket.socket()
addr = ("localhost", aiohttp_unused_port())
sock.bind(addr)
worker.sockets = [sock]
worker.log = mock.Mock()
worker.loop = loop
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.max_requests = 0
worker.cfg.is_ssl = False
def raiser():
waiter = worker._notify_waiter
worker.alive = False
waiter.set_exception(RuntimeError())
loop.call_later(0.1, raiser)
await worker._run()
worker.notify.assert_called_with()
def test__create_ssl_context_without_certs_and_ciphers(
worker,
tls_certificate_pem_path,
) -> None:
worker.cfg.ssl_version = ssl.PROTOCOL_SSLv23
worker.cfg.cert_reqs = ssl.CERT_OPTIONAL
worker.cfg.certfile = tls_certificate_pem_path
worker.cfg.keyfile = tls_certificate_pem_path
worker.cfg.ca_certs = None
worker.cfg.ciphers = None
ctx = worker._create_ssl_context(worker.cfg)
assert isinstance(ctx, ssl.SSLContext)
def test__create_ssl_context_with_ciphers(
worker,
tls_certificate_pem_path,
) -> None:
worker.cfg.ssl_version = ssl.PROTOCOL_SSLv23
worker.cfg.cert_reqs = ssl.CERT_OPTIONAL
worker.cfg.certfile = tls_certificate_pem_path
worker.cfg.keyfile = tls_certificate_pem_path
worker.cfg.ca_certs = None
worker.cfg.ciphers = "3DES PSK"
ctx = worker._create_ssl_context(worker.cfg)
assert isinstance(ctx, ssl.SSLContext)
def test__create_ssl_context_with_ca_certs(
worker,
tls_ca_certificate_pem_path,
tls_certificate_pem_path,
) -> None:
worker.cfg.ssl_version = ssl.PROTOCOL_SSLv23
worker.cfg.cert_reqs = ssl.CERT_OPTIONAL
worker.cfg.certfile = tls_certificate_pem_path
worker.cfg.keyfile = tls_certificate_pem_path
worker.cfg.ca_certs = tls_ca_certificate_pem_path
worker.cfg.ciphers = None
ctx = worker._create_ssl_context(worker.cfg)
assert isinstance(ctx, ssl.SSLContext)
| 0.000384 |
#
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB ([email protected]).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, 0).sub(repl, string, count)
def subn(pattern, repl, string, count=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, 0).subn(repl, string, count)
def split(pattern, string, maxsplit=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, 0).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i in range(len(pattern)):
c = pattern[i]
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| 0.003536 |
from __future__ import print_function
import sys
from math import sqrt
class Edge:
def __init__(self, fdo, n1, n2, eqlen):
self.fdo = fdo
self.n1 = n1
self.n2 = n2
self.eqlen = eqlen
def applySpringForces(self):
f12 = [ r - l for l,r in zip(self.n1.p, self.n2.p) ]
f12 = self.fdo.normalize(*f12)
f12 = [ f * self._dLength() * self.fdo.getSpringConst() for f in f12]
self.n2.f = [ a + b for a, b in zip(f12, self.n2.f) ]
self.n1.f = [ a - b for a, b in zip(f12, self.n1.f) ]
def _dLength(self):
l = self.fdo.dist(self.n1.p, self.n2.p)
return self.eqlen - l
class Node:
def __init__(self, fdo, p, i, mass=1):
self.fdo = fdo
self.m = mass
self.i = i
self.p = p
self.v = [ 0 for x in range(self.fdo.dims) ]
self.zeroForce()
def zeroForce(self):
self.f = [ 0 for x in range(self.fdo.dims) ]
def applyCoulombForceTo(self, other):
f12 = [ r - l for l,r in zip(self.p, other.p) ]
f12 = self.fdo.normalize(*f12)
r = self.fdo.dist(self.p, other.p)
r = max(r, self.fdo.getZeroDist())
# mass * const / r^2
other.f = [ f + fon * (self.m * self.fdo.getCoulombConst() / (r*r)) for f, fon in zip(other.f, f12)]
def update(self, t):
self.f = [ x * self.fdo.getDampConst() for x in self.f ]
accel = [ x / self.m for x in self.f ]
# vt
vv = [ v * t for v in self.v ]
# .5a^2
aa = [ .5 * t * t * a for a in accel ]
# p = p + vt + .5at^2
self.p = [ p + v + a for p,v,a in zip(self.p, vv, aa) ]
# v = v + a*t
self.v = [ v + a*t for v,a in zip(self.v, accel)]
def calcKE(self):
return .5 * self.m * sum([x*x for x in self.v])
class FDO:
# maybe all these should be @property
# but static methods can be overridden by more interesting
# derived FDO classes
@staticmethod
def getSpringConst():
return 2
@staticmethod
def getCoulombConst():
return .2
@staticmethod
def getDampConst():
return .8
@staticmethod
def getTimeStep():
return .01
@staticmethod
def getCutoffKE():
return 1000
@staticmethod
def getZeroDist():
return 1
@staticmethod
def normalize(*vector):
mag = FDO.mag(*vector)
return [ v/mag for v in vector ]
@staticmethod
def mag(*vector):
return FDO.dist([0 for i in range(len(vector))], vector)
@staticmethod
def dist(a, b):
d = 0
for l,r in zip(a,b):
d += pow(r - l, 2)
return sqrt(d)
def __init__(self, dimensions):
self.dims = dimensions
self._nodes = list()
self._edges = list()
self.size = 0
self.totalKE = 0
def addNode(self, *point):
if len(point) is not self.dims: return
try:
p = [ float(f) for f in point ]
n = Node(self, p, self.size)
self._nodes.append(n)
self.size += 1
except ValueError:
print("Bad node format, not added", file=sys.stderr)
return n
def addSpring(self, n1, n2, eqlen=None):
e = Edge(self, n1, n2, eqlen if eqlen is not None else self.dist(n1.p,n2.p))
self._edges.append(e)
def zeroNodeForces(self):
for n in self._nodes:
n.zeroForce()
def applyForces(self):
# attraction forces
for n1 in self._nodes:
for n2 in self._nodes:
if n1.i is not n2.i:
n1.applyCoulombForceTo(n2)
# spring forces
for e in self._edges:
e.applySpringForces()
def _step(self, t=None):
if t is None: t = FDO.getTimeStep()
self.zeroNodeForces()
self.applyForces()
for n in self._nodes:
n.update(t)
self.totalKE += n.calcKE()
def run(self, iterations=None, timestep=None):
i = 0
self.totalKE = 0
iters = 50 if iterations is None else iterations
t = self.getTimeStep() if timestep is None else timestep
while (i < iters or self.totalKE < self.getCutoffKE()):
self._step(t)
i += 1
def nodes(self):
for n in self._nodes:
yield n.p
def results(self):
return list(self.nodes())
def print(self):
for n in self.nodes():
print(n)
| 0.013813 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Create a Service object by reading the definition from a file
k8s:
state: present
src: /testing/service.yml
- name: Get an existing Service object
k8s:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s:
api_version: v1
kind: ServiceList
namespace: testing
register: service_list
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: Read definition file from the Ansible controller file system
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when the I(kind) is a List type resource. Contains a set of objects.
returned: when resource is a List
type: list
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()
| 0.002196 |
#!/usr/bin/env python
# encoding: utf-8
import npyscreen
#npyscreen.disableColor()
class TestApp(npyscreen.NPSApp):
def main(self):
# These lines create the form and populate it with widgets.
# A fairly complex screen in only 8 or so lines of code - a line for each control.
F = npyscreen.ActionFormExpanded(name = "Welcome to Npyscreen",)
t = F.add(npyscreen.TitleText, name = "Text:",)
fn = F.add(npyscreen.TitleFilename, name = "Filename:")
dt = F.add(npyscreen.TitleDateCombo, name = "Date:")
s = F.add(npyscreen.TitleSlider, out_of=12, name = "Slider")
ml= F.add(npyscreen.MultiLineEdit,
value = """try typing here!\nMutiline text, press ^R to reformat.\n""",
max_height=5, rely=9)
ms= F.add(npyscreen.TitleSelectOne, max_height=4, value = [1,], name="Pick One",
values = ["Option1","Option2","Option3"], scroll_exit=True)
ms2= F.add(npyscreen.TitleMultiSelect, max_height =-2, value = [1,], name="Pick Several",
values = ["Option1","Option2","Option3"], scroll_exit=True)
# This lets the user play with the Form.
F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
| 0.036392 |
# -*- coding: utf-8 -*-
"""
eve.flaskapp
~~~~~~~~~~~~
This module implements the central WSGI application object as a Flask
subclass.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import eve
import sys
import os
import copy
from flask import Flask
from werkzeug.routing import BaseConverter
from werkzeug.serving import WSGIRequestHandler
from eve.io.mongo import Mongo, Validator, GridFSMediaStorage
from eve.exceptions import ConfigException, SchemaException
from eve.endpoints import collections_endpoint, item_endpoint, home_endpoint, \
error_endpoint, media_endpoint
from eve.defaults import build_defaults
from eve.utils import api_prefix, extract_key_values
from events import Events
class EveWSGIRequestHandler(WSGIRequestHandler):
""" Extend werkzeug request handler to include current Eve version in all
responses, which is super-handy for debugging.
"""
@property
def server_version(self):
return 'Eve/%s ' % eve.__version__ + super(EveWSGIRequestHandler,
self).server_version
class RegexConverter(BaseConverter):
""" Extend werkzeug routing by supporting regex for urls/API endpoints """
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class Eve(Flask, Events):
""" The main Eve object. On initialization it will load Eve settings, then
configure and enable the API endpoints. The API is launched by executing
the code below:::
app = Eve()
app.run()
:param import_name: the name of the application package
:param settings: the name of the settings file. Defaults to `settings.py`.
:param validator: custom validation class. Must be a
:class:`~cerberus.Validator` subclass. Defaults to
:class:`eve.io.mongo.Validator`.
:param data: the data layer class. Must be a :class:`~eve.io.DataLayer`
subclass. Defaults to :class:`~eve.io.Mongo`.
:param auth: the authentication class used to authenticate incoming
requests. Must be a :class: `eve.auth.BasicAuth` subclass.
:param redis: the redis (pyredis) instance used by the Rate-Limiting
feature, if enabled.
:param url_converters: dictionary of Flask url_converters to add to
supported ones (int, float, path, regex).
:param json_encoder: custom json encoder class. Must be a
JSONEncoder subclass. You probably want it to be
as eve.io.base.BaseJSONEncoder subclass.
:param media: the media storage class. Must be a
:class:`~eve.io.media.MediaStorage` subclass.
:param kwargs: optional, standard, Flask parameters.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body. Closes #365.
'auth' argument can be either an instance or a callable. Closes #248.
Made resource setup more DRY by calling register_resource.
.. versionchanged:: 0.3
Support for optional media storage system. Defaults to
GridFSMediaStorage.
.. versionchanged:: 0.2
Support for additional Flask url converters.
Support for optional, custom json encoder class.
Support for endpoint-level authenticatoin classes.
New method Eve.register_resource() for registering new resource after
initialization of Eve object. This is needed for simpler initialization
API of all ORM/ODM extensions.
.. versionchanged:: 0.1.0
Now supporting both "trailing slashes" and "no-trailing slashes" URLs.
.. versionchanged:: 0.0.7
'redis' argument added to handle an accessory Redis server (currently
used by the Rate-Limiting feature).
.. versionchanged:: 0.0.6
'Events' added to the list of super classes, allowing for the arbitrary
raising of events within the application.
.. versionchanged:: 0.0.4
'auth' argument added to handle authentication classes
"""
#: Allowed methods for resource endpoints
supported_resource_methods = ['GET', 'POST', 'DELETE']
#: Allowed methods for item endpoints
supported_item_methods = ['GET', 'PATCH', 'DELETE', 'PUT']
def __init__(self, import_name=__package__, settings='settings.py',
validator=Validator, data=Mongo, auth=None, redis=None,
url_converters=None, json_encoder=None,
media=GridFSMediaStorage, **kwargs):
""" Eve main WSGI app is implemented as a Flask subclass. Since we want
to be able to launch our API by simply invoking Flask's run() method,
we need to enhance our super-class a little bit.
"""
super(Eve, self).__init__(import_name, **kwargs)
self.validator = validator
self.settings = settings
self.load_config()
self.validate_domain_struct()
# enable regex routing
self.url_map.converters['regex'] = RegexConverter
# optional url_converters and json encoder
if url_converters:
self.url_map.converters.update(url_converters)
self.data = data(self)
if json_encoder:
self.data.json_encoder_class = json_encoder
self.media = media(self) if media else None
self.redis = redis
if auth:
self.auth = auth() if callable(auth) else auth
else:
self.auth = None
self._init_url_rules()
self._init_media_endpoint()
self._init_oplog()
# validate and set defaults for each resource
# Use a snapshot of the DOMAIN setup for iteration so
# further insertion of versioned resources do not
# cause a RuntimeError due to the change of size of
# the dict
domain_copy = copy.deepcopy(self.config['DOMAIN'])
for resource, settings in domain_copy.items():
self.register_resource(resource, settings)
# it seems like both domain_copy and config['DOMAIN']
# suffered changes at this point, so merge them
# self.config['DOMAIN'].update(domain_copy)
self.register_error_handlers()
def run(self, host=None, port=None, debug=None, **options):
"""
Pass our own subclass of :class:`werkzeug.serving.WSGIRequestHandler
to Flask.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information. """
options.setdefault('request_handler', EveWSGIRequestHandler)
super(Eve, self).run(host, port, debug, **options)
def load_config(self):
""" API settings are loaded from standard python modules. First from
`settings.py`(or alternative name/path passed as an argument) and
then, when defined, from the file specified in the
`EVE_SETTINGS` environment variable.
Since we are a Flask subclass, any configuration value supported by
Flask itself is available (besides Eve's proper settings).
.. versionchanged:: 0.5
Allow EVE_SETTINGS envvar to be used exclusively. Closes #461.
.. versionchanged:: 0.2
Allow use of a dict object as settings.
"""
# load defaults
self.config.from_object('eve.default_settings')
# overwrite the defaults with custom user settings
if isinstance(self.settings, dict):
self.config.update(self.settings)
else:
if os.path.isabs(self.settings):
pyfile = self.settings
else:
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
pyfile = os.path.join(abspath, self.settings)
try:
self.config.from_pyfile(pyfile)
except:
# assume an envvar is going to be used exclusively
pass
# overwrite settings with custom environment variable
envvar = 'EVE_SETTINGS'
if os.environ.get(envvar):
self.config.from_envvar(envvar)
def validate_domain_struct(self):
""" Validates that Eve configuration settings conform to the
requirements.
"""
try:
domain = self.config['DOMAIN']
except:
raise ConfigException('DOMAIN dictionary missing or wrong.')
if not isinstance(domain, dict):
raise ConfigException('DOMAIN must be a dict.')
def validate_config(self):
""" Makes sure that REST methods expressed in the configuration
settings are supported.
.. versionchanged:: 0.2.0
Default supported methods are now class-level attributes.
Resource validation delegated to _validate_resource_settings().
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.4
Support for 'allowed_roles' and 'allowed_item_roles'
.. versionchanged:: 0.0.2
Support for DELETE resource method.
"""
# make sure that global resource methods are supported.
self.validate_methods(self.supported_resource_methods,
self.config.get('RESOURCE_METHODS'),
'resource')
# make sure that global item methods are supported.
self.validate_methods(self.supported_item_methods,
self.config.get('ITEM_METHODS'),
'item')
# make sure that individual resource/item methods are supported.
for resource, settings in self.config['DOMAIN'].items():
self._validate_resource_settings(resource, settings)
def _validate_resource_settings(self, resource, settings):
""" Validates one resource in configuration settings.
:param resource: name of the resource which settings refer to.
:param settings: settings of resource to be validated.
.. versionchanged:: 0.4
validate that auth_field is not set to ID_FIELD. See #266.
.. versionadded:: 0.2
"""
self.validate_methods(self.supported_resource_methods,
settings['resource_methods'],
'[%s] resource ' % resource)
self.validate_methods(self.supported_item_methods,
settings['item_methods'],
'[%s] item ' % resource)
# while a resource schema is optional for read-only access,
# it is mandatory for write-access to resource/items.
if 'POST' in settings['resource_methods'] or \
'PATCH' in settings['item_methods']:
if len(settings['schema']) == 0:
raise ConfigException('A resource schema must be provided '
'when POST or PATCH methods are allowed '
'for a resource [%s].' % resource)
self.validate_roles('allowed_roles', settings, resource)
self.validate_roles('allowed_read_roles', settings, resource)
self.validate_roles('allowed_write_roles', settings, resource)
self.validate_roles('allowed_item_roles', settings, resource)
self.validate_roles('allowed_item_read_roles', settings, resource)
self.validate_roles('allowed_item_write_roles', settings, resource)
if settings['auth_field'] == self.config['ID_FIELD']:
raise ConfigException('"%s": auth_field cannot be set to ID_FIELD '
'(%s)' % (resource, self.config['ID_FIELD']))
self.validate_schema(resource, settings['schema'])
def validate_roles(self, directive, candidate, resource):
""" Validates that user role directives are syntactically and formally
adeguate.
:param directive: either 'allowed_[read_|write_]roles' or
'allow_item_[read_|write_]roles'.
:param candidate: the candidate setting to be validated.
:param resource: name of the resource to which the candidate settings
refer to.
.. versionadded:: 0.0.4
"""
roles = candidate[directive]
if not isinstance(roles, list):
raise ConfigException("'%s' must be list"
"[%s]." % (directive, resource))
def validate_methods(self, allowed, proposed, item):
""" Compares allowed and proposed methods, raising a `ConfigException`
when they don't match.
:param allowed: a list of supported (allowed) methods.
:param proposed: a list of proposed methods.
:param item: name of the item to which the methods would be applied.
Used when raising the exception.
"""
diff = set(proposed) - set(allowed)
if diff:
raise ConfigException('Unallowed %s method(s): %s. '
'Supported: %s' %
(item, ', '.join(diff),
', '.join(allowed)))
def validate_schema(self, resource, schema):
""" Validates a resource schema.
:param resource: resource name.
:param schema: schema definition for the resource.
.. versionchanged:: 0.5
Add ETAG to automatic fields check.
.. versionchanged:: 0.4
Checks against offending document versioning fields.
Supports embedded data_relation with version.
.. versionchanged:: 0.2
Allow ID_FIELD in resource schema if not of 'objectid' type.
.. versionchanged:: 0.1.1
'collection' setting renamed to 'resource' (data_relation).
Fix order of string arguments in exception message.
.. versionchanged:: 0.1.0
Validation for 'embeddable' fields.
.. versionchanged:: 0.0.5
Validation of the 'data_relation' field rule.
Now collecting offending items in a list and inserting results into
the exception message.
"""
# ensure automatically handled fields aren't defined
fields = [eve.DATE_CREATED, eve.LAST_UPDATED, eve.ETAG]
# TODO: only add the following checks if settings['versioning'] == True
fields += [
self.config['VERSION'],
self.config['LATEST_VERSION'],
self.config['ID_FIELD'] + self.config['VERSION_ID_SUFFIX']]
offenders = []
for field in fields:
if field in schema:
offenders.append(field)
if eve.ID_FIELD in schema and \
schema[eve.ID_FIELD]['type'] == 'objectid':
offenders.append(eve.ID_FIELD)
if offenders:
raise SchemaException('field(s) "%s" not allowed in "%s" schema '
'(they will be handled automatically).'
% (', '.join(offenders), resource))
# check data_relation rules
for field, ruleset in schema.items():
if 'data_relation' in ruleset:
if 'resource' not in ruleset['data_relation']:
raise SchemaException("'resource' key is mandatory for "
"the 'data_relation' rule in "
"'%s: %s'" % (resource, field))
if ruleset['data_relation'].get('embeddable', False):
# special care for data_relations with a version
value_field = ruleset['data_relation']['field']
if ruleset['data_relation'].get('version', False):
if 'schema' not in ruleset or \
value_field not in ruleset['schema'] or \
'type' not in ruleset['schema'][value_field]:
raise SchemaException(
"Must defined type for '%s' in schema when "
"declaring an embedded data_relation with"
" version." % value_field
)
# TODO are there other mandatory settings? Validate them here
def set_defaults(self):
""" When not provided, fills individual resource settings with default
or global configuration settings.
.. versionchanged:: 0.4
`versioning`
`VERSION` added to automatic projection (when applicable)
.. versionchanged:: 0.2
Setting of actual resource defaults is delegated to
_set_resource_defaults().
.. versionchanged:: 0.1.1
'default' values that could be assimilated to None (0, None, "")
would be ignored.
'dates' helper removed as datetime conversion is now handled by
the eve.methods.common.data_parse function.
.. versionchanged:: 0.1.0
'embedding'.
Support for optional HATEOAS.
.. versionchanged:: 0.0.9
'auth_username_field' renamed to 'auth_field'.
Always include automatic fields despite of datasource projections.
.. versionchanged:: 0.0.8
'mongo_write_concern'
.. versionchanged:: 0.0.7
'extra_response_fields'
.. versionchanged:: 0.0.6
'datasource[projection]'
'projection',
'allow_unknown'
.. versionchanged:: 0.0.5
'auth_username_field'
'filters',
'sorting',
'pagination'.
.. versionchanged:: 0.0.4
'defaults',
'datasource',
'public_methods',
'public_item_methods',
'allowed_roles',
'allowed_item_roles'.
.. versionchanged:: 0.0.3
`item_title` default value.
"""
for resource, settings in self.config['DOMAIN'].items():
self._set_resource_defaults(resource, settings)
def _set_resource_defaults(self, resource, settings):
""" Low-level method which sets default values for one resource.
.. versionchanged:: 0.5
Don't set default projection if 'allow_unknown' is active (#497).
'internal_resource'
.. versionchanged:: 0.3
Set projection to None when schema is not provided for the resource.
Support for '_media' helper.
.. versionchanged:: 0.2
'resource_title',
'default_sort',
'embedded_fields'.
Support for endpoint-level authenticatoin classes.
"""
settings.setdefault('url', resource)
settings.setdefault('resource_methods',
self.config['RESOURCE_METHODS'])
settings.setdefault('public_methods',
self.config['PUBLIC_METHODS'])
settings.setdefault('allowed_roles', self.config['ALLOWED_ROLES'])
settings.setdefault('allowed_read_roles',
self.config['ALLOWED_READ_ROLES'])
settings.setdefault('allowed_write_roles',
self.config['ALLOWED_WRITE_ROLES'])
settings.setdefault('cache_control', self.config['CACHE_CONTROL'])
settings.setdefault('cache_expires', self.config['CACHE_EXPIRES'])
settings.setdefault('item_lookup_field',
self.config['ITEM_LOOKUP_FIELD'])
settings.setdefault('item_url', self.config['ITEM_URL'])
settings.setdefault('resource_title', settings['url'])
settings.setdefault('item_title',
resource.rstrip('s').capitalize())
settings.setdefault('item_lookup', self.config['ITEM_LOOKUP'])
settings.setdefault('public_item_methods',
self.config['PUBLIC_ITEM_METHODS'])
settings.setdefault('allowed_item_roles',
self.config['ALLOWED_ITEM_ROLES'])
settings.setdefault('allowed_item_read_roles',
self.config['ALLOWED_ITEM_READ_ROLES'])
settings.setdefault('allowed_item_write_roles',
self.config['ALLOWED_ITEM_WRITE_ROLES'])
settings.setdefault('allowed_filters',
self.config['ALLOWED_FILTERS'])
settings.setdefault('sorting', self.config['SORTING'])
settings.setdefault('embedding', self.config['EMBEDDING'])
settings.setdefault('embedded_fields', [])
settings.setdefault('pagination', self.config['PAGINATION'])
settings.setdefault('projection', self.config['PROJECTION'])
settings.setdefault('versioning', self.config['VERSIONING'])
settings.setdefault('internal_resource',
self.config['INTERNAL_RESOURCE'])
settings.setdefault('etag_ignore_fields', None)
# TODO make sure that this we really need the test below
if settings['item_lookup']:
item_methods = self.config['ITEM_METHODS']
else:
item_methods = eve.ITEM_METHODS
settings.setdefault('item_methods', item_methods)
settings.setdefault('auth_field',
self.config['AUTH_FIELD'])
settings.setdefault('allow_unknown', self.config['ALLOW_UNKNOWN'])
settings.setdefault('extra_response_fields',
self.config['EXTRA_RESPONSE_FIELDS'])
settings.setdefault('mongo_write_concern',
self.config['MONGO_WRITE_CONCERN'])
settings.setdefault('hateoas',
self.config['HATEOAS'])
settings.setdefault('authentication', self.auth if self.auth else None)
# empty schemas are allowed for read-only access to resources
schema = settings.setdefault('schema', {})
self.set_schema_defaults(schema)
datasource = {}
settings.setdefault('datasource', datasource)
settings['datasource'].setdefault('source', resource)
settings['datasource'].setdefault('filter', None)
settings['datasource'].setdefault('default_sort', None)
if len(schema) and settings['allow_unknown'] is False:
# enable retrieval of actual schema fields only. Eventual db
# fields not included in the schema won't be returned.
projection = {}
# despite projection, automatic fields are always included.
projection[self.config['ID_FIELD']] = 1
projection[self.config['LAST_UPDATED']] = 1
projection[self.config['DATE_CREATED']] = 1
projection[self.config['ETAG']] = 1
if settings['versioning'] is True:
projection[self.config['VERSION']] = 1
projection[
self.config['ID_FIELD'] +
self.config['VERSION_ID_SUFFIX']] = 1
projection.update(dict((field, 1) for (field) in schema))
else:
# all fields are returned.
projection = None
settings['datasource'].setdefault('projection', projection)
# 'defaults' helper set contains the names of fields with default
# values in their schema definition.
# TODO support default values for embedded documents.
settings['defaults'] = build_defaults(schema)
# list of all media fields for the resource
settings['_media'] = [field for field, definition in schema.items() if
definition.get('type') == 'media']
if settings['_media'] and not self.media:
raise ConfigException('A media storage class of type '
' eve.io.media.MediaStorage but be defined '
'for "media" fields to be properly stored.')
def set_schema_defaults(self, schema):
""" When not provided, fills individual schema settings with default
or global configuration settings.
:param schema: the resource schema to be initialized with default
values
.. versionchanged: 0.0.7
Setting the default 'field' value would not happen if the
'data_relation' was nested deeper than the first schema level (#60).
.. versionadded: 0.0.5
"""
# TODO fill schema{} defaults, like field type, etc.
# set default 'field' value for all 'data_relation' rulesets, however
# nested
for data_relation in list(extract_key_values('data_relation', schema)):
data_relation.setdefault('field', self.config['ID_FIELD'])
# TODO: find a way to autofill "self.app.config['VERSION']: \
# {'type': 'integer'}" for data_relations
@property
def api_prefix(self):
""" Prefix to API endpoints.
.. versionadded:: 0.2
"""
return api_prefix(self.config['URL_PREFIX'],
self.config['API_VERSION'])
def _add_resource_url_rules(self, resource, settings):
""" Builds the API url map for one resource. Methods are enabled for
each mapped endpoint, as configured in the settings.
.. versionchanged:: 0.5
Don't add resource to url rules if it's flagged as internal.
Strip regexes out of config.URLS helper. Closes #466.
.. versionadded:: 0.2
"""
self.config['SOURCES'][resource] = settings['datasource']
if settings['internal_resource']:
return
url = '%s/%s' % (self.api_prefix, settings['url'])
pretty_url = settings['url']
if '<' in pretty_url:
pretty_url = pretty_url[:pretty_url.index('<') + 1] + \
pretty_url[pretty_url.rindex(':') + 1:]
self.config['URLS'][resource] = pretty_url
# resource endpoint
endpoint = resource + "|resource"
self.add_url_rule(url, endpoint, view_func=collections_endpoint,
methods=settings['resource_methods'] + ['OPTIONS'])
# item endpoint
if settings['item_lookup']:
item_url = '%s/<%s:%s>' % (url, settings['item_url'],
settings['item_lookup_field'])
endpoint = resource + "|item_lookup"
self.add_url_rule(item_url, endpoint,
view_func=item_endpoint,
methods=settings['item_methods'] + ['OPTIONS'])
if 'PATCH' in settings['item_methods']:
# support for POST with X-HTTM-Method-Override header for
# clients not supporting PATCH. Also see item_endpoint() in
# endpoints.py
endpoint = resource + "|item_post_override"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['POST'])
# also enable an alternative lookup/endpoint if allowed
lookup = settings.get('additional_lookup')
if lookup:
l_type = settings['schema'][lookup['field']]['type']
if l_type == 'integer':
item_url = '%s/<int:%s>' % (url, lookup['field'])
else:
item_url = '%s/<%s:%s>' % (url, lookup['url'],
lookup['field'])
endpoint = resource + "|item_additional_lookup"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['GET', 'OPTIONS'])
def _init_url_rules(self):
""" Builds the API url map. Methods are enabled for each mapped
endpoint, as configured in the settings.
.. versionchanged:: 0.4
Renamed from '_add_url_rules' to '_init_url_rules' to make code more
DRY. Individual resource rules get built from register_resource now.
.. versionchanged:: 0.2
Delegate adding of resource rules to _add_resource_rules().
.. versionchanged:: 0.1.1
Simplified URL rules. Not using regexes anymore to return the
endpoint URL to the endpoint function. This allows for nested
endpoints to function properly.
.. versionchanged:: 0.0.9
Handle the case of 'additional_lookup' field being an integer.
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing. 'OPTIONS' method is
explicitly routed to standard endpoints to allow for proper CORS
processing.
.. versionchanged:: 0.0.4
config.SOURCES. Maps resources to their datasources.
.. versionchanged:: 0.0.3
Support for API_VERSION as an endpoint prefix.
"""
# helpers
self.config['URLS'] = {} # maps resources to urls
self.config['SOURCES'] = {} # maps resources to their datasources
# we choose not to care about trailing slashes at all.
# Both '/resource/' and '/resource' will work, same with
# '/resource/<id>/' and '/resource/<id>'
self.url_map.strict_slashes = False
# home page (API entry point)
self.add_url_rule('%s/' % self.api_prefix, 'home',
view_func=home_endpoint, methods=['GET', 'OPTIONS'])
def register_resource(self, resource, settings):
""" Registers new resource to the domain.
Under the hood this validates given settings, updates default values
and adds necessary URL routes (builds api url map).
If there exists some resource with given name, it is overwritten.
:param resource: resource name.
:param settings: settings for given resource.
.. versionchanged:: 0.4
Support for document versioning.
.. versionadded:: 0.2
"""
# this line only makes sense when we call this function outside of the
# standard Eve setup routine, but it doesn't hurt to still call it
self.config['DOMAIN'][resource] = settings
# set up resource
self._set_resource_defaults(resource, settings)
self._validate_resource_settings(resource, settings)
self._add_resource_url_rules(resource, settings)
# add rules for version control collections if appropriate
if settings['versioning'] is True:
versioned_resource = resource + self.config['VERSIONS']
self.config['DOMAIN'][versioned_resource] = \
copy.deepcopy(self.config['DOMAIN'][resource])
self.config['DOMAIN'][versioned_resource]['datasource']['source'] \
+= self.config['VERSIONS']
self.config['SOURCES'][versioned_resource] = \
copy.deepcopy(self.config['SOURCES'][resource])
self.config['SOURCES'][versioned_resource]['source'] += \
self.config['VERSIONS']
# the new versioned resource also needs URL rules
self._add_resource_url_rules(
versioned_resource,
self.config['DOMAIN'][versioned_resource]
)
def register_error_handlers(self):
""" Register custom error handlers so we make sure that all errors
return a parseable body.
.. versionadded:: 0.4
"""
for code in [400, 401, 403, 404, 405, 406, 409, 410, 422]:
self.error_handler_spec[None][code] = error_endpoint
def _init_oplog(self):
""" If enabled, configures the OPLOG endpoint.
.. versionadded:: 0.5
"""
name, endpoint, audit = (
self.config['OPLOG_NAME'],
self.config['OPLOG_ENDPOINT'],
self.config['OPLOG_AUDIT']
)
if endpoint:
settings = self.config['DOMAIN'].setdefault(name, {})
settings.setdefault('url', endpoint)
settings.setdefault('datasource', {'source': name})
# this endpoint is always read-only
settings['resource_methods'] = ['GET']
settings['item_methods'] = ['GET']
# schema is also fixed. it is needed because otherwise we
# would end up exposing the AUTH_FIELD when User-Restricted-
# Resource-Access is enabled.
settings['schema'] = {
'r': {},
'o': {},
'i': {},
}
if audit:
settings['schema'].update(
{
'ip': {},
'c': {}
}
)
def _init_media_endpoint(self):
endpoint = self.config['MEDIA_ENDPOINT']
if endpoint:
media_url = '%s/%s/<%s:_id>' % (self.api_prefix,
endpoint,
self.config['MEDIA_URL'])
self.add_url_rule(media_url, 'media',
view_func=media_endpoint, methods=['GET'])
| 0.000059 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs), cv=5)
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| 0.003025 |
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename, 'U') as f:
return f.read(), filename
# Use sys.stdout encoding for output.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
if not self.buf:
# Reset it to an empty string, to make sure it's not unicode.
self.buf = ''
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source
if isinstance(source, unicode):
source = source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
# It is probably a bug that this exception is not also raised if the
# number of doctest examples in tests is zero (i.e. if no doctest
# examples were found). However, we should probably not be raising
# an exception at all here, though it is too late to make this change
# for a maintenance release. See also issue #14649.
raise ValueError(module, "has no docstrings")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
| 0.001245 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.plugins.malware.malfind as malfind
import volatility.plugins.linux.pslist as pslist
import volatility.plugins.linux.common as linux_common
import volatility.utils as utils
import volatility.debug as debug
try:
import yara
has_yara = True
except ImportError:
has_yara = False
class VmaYaraScanner(malfind.BaseYaraScanner):
"""A scanner over all memory regions of a process."""
def __init__(self, task = None, **kwargs):
"""Scan the process address space through the VMAs.
Args:
task: The task_struct object for this task.
"""
self.task = task
malfind.BaseYaraScanner.__init__(self, address_space = task.get_process_address_space(), **kwargs)
def scan(self, offset = 0, maxlen = None):
for vma in self.task.get_proc_maps():
for match in malfind.BaseYaraScanner.scan(self, vma.vm_start, vma.vm_end - vma.vm_start):
yield match
class linux_yarascan(malfind.YaraScan):
"""A shell in the Linux memory image"""
@staticmethod
def is_valid_profile(profile):
return profile.metadata.get('os', 'Unknown').lower() == 'linux'
def calculate(self):
## we need this module imported
if not has_yara:
debug.error("Please install Yara from code.google.com/p/yara-project")
## leveraged from the windows yarascan plugin
rules = self._compile_rules()
## set the linux plugin address spaces
linux_common.set_plugin_members(self)
if self._config.KERNEL:
## the start of kernel memory taken from VolatilityLinuxIntelValidAS
if self.addr_space.profile.metadata.get('memory_model', '32bit') == "32bit":
kernel_start = 0xc0000000
else:
kernel_start = 0xffffffff80000000
scanner = malfind.DiscontigYaraScanner(rules = rules,
address_space = self.addr_space)
for hit, address in scanner.scan(start_offset = kernel_start):
yield (None, address, hit,
scanner.address_space.zread(address, 64))
else:
for task in pslist.linux_pslist(self._config).calculate():
scanner = VmaYaraScanner(task = task, rules = rules)
for hit, address in scanner.scan():
yield (task, address, hit,
scanner.address_space.zread(address, 64))
def render_text(self, outfd, data):
for task, address, hit, buf in data:
if task:
outfd.write("Task: {0} pid {1} rule {2} addr {3:#x}\n".format(
task.comm, task.pid, hit.rule, address))
else:
outfd.write("[kernel] rule {0} addr {1:#x}\n".format(hit.rule, address))
outfd.write("".join(["{0:#010x} {1:<48} {2}\n".format(
address + o, h, ''.join(c)) for o, h, c in utils.Hexdump(buf)]))
| 0.011926 |
# Copyright (C) 2001-2007 Python Software Foundation
# Author: Anthony Baxter
# Contact: [email protected]
"""Class representing audio/* type MIME documents."""
__all__ = ['MIMEAudio']
import sndhdr
from io import BytesIO
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
_sndhdr_MIMEmap = {'au' : 'basic',
'wav' :'x-wav',
'aiff':'x-aiff',
'aifc':'x-aiff',
}
# There are others in sndhdr that don't have MIME types. :(
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
def _whatsnd(data):
"""Try to identify a sound file type.
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
we re-do it here. It would be easier to reverse engineer the Unix 'file'
command and use the standard 'magic' file, as shipped with a modern Unix.
"""
hdr = data[:512]
fakefile = BytesIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None
class MIMEAudio(MIMENonMultipart):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
_encoder=encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
can be decoded by the standard Python `sndhdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific audio subtype via the
_subtype parameter. If _subtype is not given, and no subtype can be
guessed, a TypeError is raised.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = _whatsnd(_audiodata)
if _subtype is None:
raise TypeError('Could not find audio MIME subtype')
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
self.set_payload(_audiodata)
_encoder(self)
| 0.002992 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Backend.hypervisor'
db.add_column('db_backend', 'hypervisor', self.gf('django.db.models.fields.CharField')(default='kvm', max_length=32), keep_default=False)
def backwards(self, orm):
# Deleting field 'Backend.hypervisor'
db.delete_column('db_backend', 'hypervisor')
models = {
'db.backend': {
'Meta': {'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'default': "'plain'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'gateway6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'pool': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'network'", 'unique': 'True', 'null': 'True', 'to': "orm['db.IPPoolTable']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'subnet': ('django.db.models.fields.CharField', [], {'default': "'10.0.0.0/24'", 'max_length': '32'}),
'subnet6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'ipv4': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'}),
'ipv6': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.quotaholderserial': {
'Meta': {'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']"}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db']
| 0.008688 |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QModelIndex, Qt
from PyQt5.QtGui import QStandardItemModel
from PyQt5.QtWidgets import QApplication, QItemDelegate, QSpinBox, QTableView
class SpinBoxDelegate(QItemDelegate):
def createEditor(self, parent, option, index):
editor = QSpinBox(parent)
editor.setMinimum(0)
editor.setMaximum(100)
return editor
def setEditorData(self, spinBox, index):
value = index.model().data(index, Qt.EditRole)
spinBox.setValue(value)
def setModelData(self, spinBox, model, index):
spinBox.interpretText()
value = spinBox.value()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
model = QStandardItemModel(4, 2)
tableView = QTableView()
tableView.setModel(model)
delegate = SpinBoxDelegate()
tableView.setItemDelegate(delegate)
for row in range(4):
for column in range(2):
index = model.index(row, column, QModelIndex())
model.setData(index, (row + 1) * (column + 1))
tableView.setWindowTitle("Spin Box Delegate")
tableView.show()
sys.exit(app.exec_())
| 0.009245 |
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
from slicc.symbols import Var
class EnqueueStatementAST(StatementAST):
def __init__(self, slicc, queue_name, type_ast, pairs, statements):
super(EnqueueStatementAST, self).__init__(slicc, pairs)
self.queue_name = queue_name
self.type_ast = type_ast
self.statements = statements
def __repr__(self):
return "[EnqueueStatementAst: %s %s %s]" % \
(self.queue_name, self.type_ast.ident, self.statements)
def generate(self, code, return_type):
code("{")
code.indent()
self.symtab.pushFrame()
msg_type = self.type_ast.type
# Add new local var to symbol table
v = Var(self.symtab, "out_msg", self.location, msg_type, "*out_msg",
self.pairs)
self.symtab.newSymbol(v)
# Declare message
code("${{msg_type.ident}} *out_msg = "\
"new ${{msg_type.ident}}(clockEdge());")
# The other statements
t = self.statements.generate(code, None)
self.queue_name.assertType("OutPort")
args = [ "out_msg" ]
if "latency" in self:
latency = self["latency"]
try:
# see if this is an integer
latency = int(latency)
args.append("Cycles(%s)" % latency)
except ValueError:
# if not, it should be a member
args.append("m_%s" % latency)
args = ", ".join(args)
code('(${{self.queue_name.var.code}}).enqueue($args);')
# End scope
self.symtab.popFrame()
code.dedent()
code("}")
def findResources(self, resources):
var = self.queue_name.var
res_count = int(resources.get(var, 0))
resources[var] = str(res_count + 1)
| 0.001453 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from neutron.db import model_base
class TunnelKeyLast(model_base.BASEV2):
"""Last allocated Tunnel key.
The next key allocation will be started from this value + 1
"""
last_key = sa.Column(sa.Integer, primary_key=True)
def __repr__(self):
return "<TunnelKeyLast(%x)>" % self.last_key
class TunnelKey(model_base.BASEV2):
"""Netowrk ID <-> tunnel key mapping."""
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
tunnel_key = sa.Column(sa.Integer, primary_key=True,
nullable=False, autoincrement=False)
def __repr__(self):
return "<TunnelKey(%s,%x)>" % (self.network_id, self.tunnel_key)
| 0 |
# coding: utf-8
#------------------------------------------------------------------------------
# Copyright (c) 2008 Sébastien Boisgérault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
__all__ = ["ExpatError", "ParserCreate", "XMLParserType", "error", "errors"]
# Jython check
import sys
if not sys.platform.startswith('java'):
raise ImportError("this version of expat requires the jython interpreter")
# Standard Python Library
import re
import types
# Jython
from org.python.core import Py
from org.python.core.util import StringUtil
from jarray import array
# Java Standard Edition
from java.io import ByteArrayInputStream
from java.lang import String, StringBuilder
from org.xml.sax import InputSource
from org.xml.sax import SAXNotRecognizedException, SAXParseException
from org.xml.sax.helpers import XMLReaderFactory
from org.xml.sax.ext import DefaultHandler2
# Xerces
try:
# Name mangled by jarjar?
import org.python.apache.xerces.parsers.SAXParser
_xerces_parser = "org.python.apache.xerces.parsers.SAXParser"
except ImportError:
_xerces_parser = "org.apache.xerces.parsers.SAXParser"
# @expat args registry
_register = {}
def ParserCreate(encoding=None, namespace_separator=None):
return XMLParser(encoding, namespace_separator)
class XMLParser(object):
def __init__(self, encoding, namespace_separator):
self.encoding = encoding
self.CurrentLineNumber = 1
self.CurrentColumnNumber = 0
self._NextLineNumber = 1
self._NextColumnNumber = 0
self.ErrorLineNumber = -1
self.ErrorColumnNumber = -1
self.ErrorCode = None
if namespace_separator is None:
self.namespace_separator = namespace_separator
elif isinstance(namespace_separator, basestring):
self.namespace_separator = str(namespace_separator)
if len(self.namespace_separator) > 1:
error = ("namespace_separator must be at most one character, "
"omitted, or None")
raise ValueError(error)
else:
error = ("ParserCreate() argument 2 must be string or None, "
"not %s" % type(namespace_separator).__name__)
raise TypeError(error)
self._reader = XMLReaderFactory.createXMLReader(_xerces_parser)
if self.namespace_separator is None:
try:
feature = "http://xml.org/sax/features/namespaces"
self._reader.setFeature(feature, False)
except SAXNotRecognizedException:
error = ("namespace support cannot be disabled; "
"set namespace_separator to a string of length 1.")
raise ValueError(error)
self._base = None
self._buffer_text = True
self._returns_unicode = True
self._data = StringBuilder()
self._handler = XMLEventHandler(self)
self._reader.setContentHandler(self._handler)
self._reader.setErrorHandler(self._handler)
self._reader.setDTDHandler(self._handler)
self._reader.setEntityResolver(self._handler)
sax_properties = ("lexical-handler", "declaration-handler")
for name in sax_properties:
try:
name = "http://xml.org/sax/properties/" + name
self._reader.setProperty(name, self._handler)
except SAXNotRecognizedException:
error = "can't set property %r" % name
raise NotImplementedError(error)
apache_features = (("nonvalidating/load-external-dtd", False),)
for name, value in apache_features:
try:
name = "http://apache.org/xml/features/" + name
self._reader.setFeature(name, value)
except SAXNotRecognizedException:
error = "can't set feature %r" % name
raise NotImplementedError(error)
# experimental
#f = "http://xml.org/sax/features/external-general-entities"
f = "http://xml.org/sax/features/external-parameter-entities"
#self._reader.setFeature(f, False)
# check
f = "http://xml.org/sax/features/use-entity-resolver2"
assert self._reader.getFeature(f)
def GetBase(self):
return self._base
def SetBase(self, base):
self._base = base
def _error(self, value=None):
raise AttributeError("'XMLParser' has no such attribute")
def _get_buffer_text(self):
return self._buffer_text
def _set_buffer_text(self, value):
self._buffer_text = bool(value)
def _get_returns_unicode(self):
return bool(self._returns_unicode)
def _set_returns_unicode(self, value):
self._returns_unicode = value
# 'ordered' and 'specified' attributes are not supported
ordered_attributes = property(_error, _error)
specified_attributes = property(_error, _error)
# any setting is allowed, but it won't make a difference
buffer_text = property(_get_buffer_text, _set_buffer_text)
# non-significant read-only values
buffer_used = property(lambda self: None)
buffer_size = property(lambda self: None)
# 'returns_unicode' attribute is properly supported
returns_unicode = property(_get_returns_unicode, _set_returns_unicode)
def _expat_error(self, sax_error):
sax_message = sax_error.getMessage()
pattern = 'The entity ".*" was referenced, but not declared\.'
if re.match(pattern, sax_message):
expat_message = "undefined entity: line %s, column %s" % \
(self.ErrorLineNumber, self.ErrorColumnNumber)
else:
expat_message = sax_message
error = ExpatError(expat_message)
error.lineno = self.ErrorLineNumber
error.offset = self.ErrorColumnNumber
error.code = self.ErrorCode
return error
def Parse(self, data, isfinal=False):
# The 'data' argument should be an encoded text: a str instance that
# represents an array of bytes. If instead it is a unicode string,
# only the us-ascii range is considered safe enough to be silently
# converted.
if isinstance(data, unicode):
data = data.encode(sys.getdefaultencoding())
self._data.append(data)
if isfinal:
bytes = StringUtil.toBytes(self._data.toString())
byte_stream = ByteArrayInputStream(bytes)
source = InputSource(byte_stream)
if self.encoding is not None:
source.setEncoding(self.encoding)
try:
self._reader.parse(source)
except SAXParseException, sax_error:
# Experiments tend to show that the '_Next*' parser locations
# match more closely expat behavior than the 'Current*' or sax
# error locations.
self.ErrorLineNumber = self._NextLineNumber
self.ErrorColumnNumber = self._NextColumnNumber
self.ErrorCode = None
raise self._expat_error(sax_error)
return 1
def ParseFile(self, file):
# TODO: pseudo-buffering if a read without argument is not supported.
# document parse / parsefile usage.
return self.Parse(file.read(), isfinal=True)
XMLParserType = XMLParser
def _encode(arg, encoding):
if isinstance(arg, unicode):
return arg.encode(encoding)
else:
if isinstance(arg, dict):
iterator = arg.iteritems()
else:
iterator = iter(arg)
return type(arg)(_encode(_arg, encoding) for _arg in iterator)
def expat(callback=None, guard=True, force=False, returns=None):
def _expat(method):
name = method.__name__
context = id(sys._getframe(1))
key = name, context
append = _register.setdefault(key, []).append
append((method, callback, guard, force, returns))
def new_method(*args):
self = args[0]
parser = self.parser
self._update_location(event=name) # bug if multiple method def
for (method, callback, guard, force, returns) in _register[key]:
if guard not in (True, False):
guard = getattr(self, guard)
_callback = callback and guard and \
getattr(parser, callback, None)
if _callback or force:
results = method(*args)
if _callback:
if not isinstance(results, tuple):
results = (results,)
if not parser.returns_unicode:
results = _encode(results, "utf-8")
_callback(*results)
return returns
new_method.__name__ = name
#new_method.__doc__ = method.__doc__ # what to do with multiple docs ?
return new_method
return _expat
class XMLEventHandler(DefaultHandler2):
def __init__(self, parser):
self.parser = parser
self._tags = {}
self.not_in_dtd = True
self._entity = {}
self._previous_event = None
# --- Helpers -------------------------------------------------------------
def _intern(self, tag):
return self._tags.setdefault(tag, tag)
def _qualify(self, local_name, qname, namespace=None):
namespace_separator = self.parser.namespace_separator
if namespace_separator is None:
return qname
if not namespace:
return local_name
else:
return namespace + namespace_separator + local_name
def _char_slice_to_unicode(self, characters, start, length):
"""Convert a char[] slice to a PyUnicode instance"""
text = Py.newUnicode(String(characters[start:start + length]))
return text
def _expat_content_model(self, name, model_):
# TODO : implement a model parser
return (name, model_) # does not fit expat conventions
def _update_location(self, event=None):
parser = self.parser
locator = self._locator
# ugly hack that takes care of a xerces-specific (?) locator issue:
# locate start and end elements at the '<' instead of the first tag
# type character.
if event == "startElement" and self._previous_event == "characters":
parser._NextColumnNumber = max(parser._NextColumnNumber - 1, 0)
if event == "endElement" and self._previous_event == "characters":
parser._NextColumnNumber = max(parser._NextColumnNumber - 2, 0)
# TODO: use the same trick to report accurate error locations ?
parser.CurrentLineNumber = parser._NextLineNumber
parser.CurrentColumnNumber = parser._NextColumnNumber
parser._NextLineNumber = locator.getLineNumber()
parser._NextColumnNumber = locator.getColumnNumber() - 1
self._previous_event = event
# --- ContentHandler Interface --------------------------------------------
@expat("ProcessingInstructionHandler")
def processingInstruction(self, target, data):
return target, data
@expat("StartElementHandler")
def startElement(self, namespace, local_name, qname, attributes):
tag = self._qualify(local_name, qname, namespace)
attribs = {}
length = attributes.getLength()
for index in range(length):
local_name = attributes.getLocalName(index)
qname = attributes.getQName(index)
namespace = attributes.getURI(index)
name = self._qualify(local_name, qname, namespace)
value = attributes.getValue(index)
attribs[name] = value
return self._intern(tag), attribs
@expat("EndElementHandler")
def endElement(self, namespace, local_name, qname):
return self._intern(self._qualify(local_name, qname, namespace))
@expat("CharacterDataHandler")
def characters(self, characters, start, length):
return self._char_slice_to_unicode(characters, start, length)
@expat("DefaultHandlerExpand")
def characters(self, characters, start, length):
return self._char_slice_to_unicode(characters, start, length)
@expat("DefaultHandler")
def characters(self, characters, start, length):
# TODO: make a helper function here
if self._entity["location"] == (self.parser.CurrentLineNumber,
self.parser.CurrentColumnNumber):
return "&%s;" % self._entity["name"]
else:
return self._char_slice_to_unicode(characters, start, length)
@expat("StartNamespaceDeclHandler")
def startPrefixMapping(self, prefix, uri):
return prefix, uri
@expat("EndNamespaceDeclHandler")
def endPrefixMapping(self, prefix):
return prefix
empty_source = InputSource(ByteArrayInputStream(array([], "b")))
@expat("ExternalEntityRefHandler", guard="not_in_dtd",
returns=empty_source)
def resolveEntity(self, name, publicId, baseURI, systemId):
context = name # wrong. see expat headers documentation.
base = self.parser.GetBase()
return context, base, systemId, publicId
@expat("DefaultHandlerExpand", guard="not_in_dtd",
returns=empty_source)
def resolveEntity(self, name, publicId, baseURI, systemId):
return "&%s;" % name
@expat("DefaultHandler", guard="not_in_dtd",
returns=empty_source)
def resolveEntity(self, name, publicId, baseURI, systemId):
return "&%s;" % name
@expat(force=True, returns=empty_source)
def resolveEntity(self, name, publicId, baseURI, systemId):
pass
def setDocumentLocator(self, locator):
self._locator = locator
def skippedEntity(self, name):
error = ExpatError()
error.lineno = self.ErrorLineNumber = self.parser._NextLineNumber
error.offset = self.ErrorColumnNumber = self.parser._NextColumnNumber
error.code = self.ErrorCode = None
message = "undefined entity &%s;: line %s, column %s"
message = message % (name, error.lineno, error.offset)
error.__init__(message)
raise error
# --- LexicalHandler Interface --------------------------------------------
@expat("CommentHandler")
def comment(self, characters, start, length):
return self._char_slice_to_unicode(characters, start, length)
@expat("StartCdataSectionHandler")
def startCDATA(self):
return ()
@expat("EndCdataSectionHandler")
def endCDATA(self):
return ()
@expat("StartDoctypeDeclHandler", force=True)
def startDTD(self, name, publicId, systemId):
self.not_in_dtd = False
has_internal_subset = 0 # don't know this ...
return name, systemId, publicId, has_internal_subset
@expat("EndDoctypeDeclHandler", force=True)
def endDTD(self):
self.not_in_dtd = True
def startEntity(self, name):
self._entity = {}
self._entity["location"] = (self.parser._NextLineNumber,
self.parser._NextColumnNumber)
self._entity["name"] = name
def endEntity(self, name):
pass
# --- DTDHandler Interface ------------------------------------------------
@expat("NotationDeclHandler")
def notationDecl(self, name, publicId, systemId):
base = self.parser.GetBase()
return name, base, systemId, publicId
@expat("UnparsedEntityDeclHandler") # deprecated
def unparsedEntityDecl(self, name, publicId, systemId, notationName):
base = self.parser.GetBase()
return name, base, systemId, publicId, notationName
# --- DeclHandler Interface -----------------------------------------------
@expat("AttlistDeclHandler")
def attributeDecl(self, eName, aName, type, mode, value):
# TODO: adapt mode, required, etc.
required = False
return eName, aName, type, value, required
@expat("ElementDeclHandler")
def elementDecl(self, name, model):
return self._expat_content_model(name, model)
@expat("EntityDeclHandler")
def externalEntityDecl(self, name, publicId, systemId):
base = self.parser.GetBase()
value = None
is_parameter_entity = None
notation_name = None
return (name, is_parameter_entity, value, base, systemId, publicId,
notation_name)
@expat("EntityDeclHandler")
def internalEntityDecl(self, name, value):
base = self.parser.GetBase()
is_parameter_entity = None
notation_name = None
systemId, publicId = None, None
return (name, is_parameter_entity, value, base, systemId, publicId,
notation_name)
def _init_model():
global model
model = types.ModuleType("pyexpat.model")
model.__doc__ = "Constants used to interpret content model information."
quantifiers = "NONE, OPT, REP, PLUS"
for i, quantifier in enumerate(quantifiers.split(", ")):
setattr(model, "XML_CQUANT_" + quantifier, i)
types_ = "EMPTY, ANY, MIXED, NAME, CHOICE, SEQ"
for i, type_ in enumerate(types_.split(", ")):
setattr(model, "XML_CTYPE_" + type_, i+1)
_init_model()
del _init_model
class ExpatError(Exception):
pass
error = ExpatError
def _init_error_strings():
global ErrorString
error_strings = (
None,
"out of memory",
"syntax error",
"no element found",
"not well-formed (invalid token)",
"unclosed token",
"partial character",
"mismatched tag",
"duplicate attribute",
"junk after document element",
"illegal parameter entity reference",
"undefined entity",
"recursive entity reference",
"asynchronous entity",
"reference to invalid character number",
"reference to binary entity",
"reference to external entity in attribute",
"XML or text declaration not at start of entity",
"unknown encoding",
"encoding specified in XML declaration is incorrect",
"unclosed CDATA section",
"error in processing external entity reference",
"document is not standalone",
"unexpected parser state - please send a bug report",
"entity declared in parameter entity",
"requested feature requires XML_DTD support in Expat",
"cannot change setting once parsing has begun",
"unbound prefix",
"must not undeclare prefix",
"incomplete markup in parameter entity",
"XML declaration not well-formed",
"text declaration not well-formed",
"illegal character(s) in public id",
"parser suspended",
"parser not suspended",
"parsing aborted",
"parsing finished",
"cannot suspend in external parameter entity")
def ErrorString(code):
try:
return error_strings[code]
except IndexError:
return None
_init_error_strings()
del _init_error_strings
def _init_errors():
global errors
errors = types.ModuleType("pyexpat.errors")
errors.__doc__ = "Constants used to describe error conditions."
error_names = """
XML_ERROR_NONE
XML_ERROR_NONE,
XML_ERROR_NO_MEMORY,
XML_ERROR_SYNTAX,
XML_ERROR_NO_ELEMENTS,
XML_ERROR_INVALID_TOKEN,
XML_ERROR_UNCLOSED_TOKEN,
XML_ERROR_PARTIAL_CHAR,
XML_ERROR_TAG_MISMATCH,
XML_ERROR_DUPLICATE_ATTRIBUTE,
XML_ERROR_JUNK_AFTER_DOC_ELEMENT,
XML_ERROR_PARAM_ENTITY_REF,
XML_ERROR_UNDEFINED_ENTITY,
XML_ERROR_RECURSIVE_ENTITY_REF,
XML_ERROR_ASYNC_ENTITY,
XML_ERROR_BAD_CHAR_REF,
XML_ERROR_BINARY_ENTITY_REF,
XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF,
XML_ERROR_MISPLACED_XML_PI,
XML_ERROR_UNKNOWN_ENCODING,
XML_ERROR_INCORRECT_ENCODING,
XML_ERROR_UNCLOSED_CDATA_SECTION,
XML_ERROR_EXTERNAL_ENTITY_HANDLING,
XML_ERROR_NOT_STANDALONE,
XML_ERROR_UNEXPECTED_STATE,
XML_ERROR_ENTITY_DECLARED_IN_PE,
XML_ERROR_FEATURE_REQUIRES_XML_DTD,
XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING,
XML_ERROR_UNBOUND_PREFIX,
XML_ERROR_UNDECLARING_PREFIX,
XML_ERROR_INCOMPLETE_PE,
XML_ERROR_XML_DECL,
XML_ERROR_TEXT_DECL,
XML_ERROR_PUBLICID,
XML_ERROR_SUSPENDED,
XML_ERROR_NOT_SUSPENDED,
XML_ERROR_ABORTED,
XML_ERROR_FINISHED,
XML_ERROR_SUSPEND_PE
"""
error_names = [name.strip() for name in error_names.split(',')]
for i, name in enumerate(error_names[1:]):
setattr(errors, name, ErrorString(i+1))
_init_errors()
del _init_errors
| 0.000821 |
from sympy import Symbol, Integer
from sympy.physics.quantum.qexpr import QExpr, _qsympify_sequence
from sympy.physics.quantum.hilbert import HilbertSpace
from sympy.core.containers import Tuple
x = Symbol('x')
y = Symbol('y')
def test_qexpr_new():
q = QExpr(0)
assert q.label == (0,)
assert q.hilbert_space == HilbertSpace()
assert q.is_commutative is False
q = QExpr(0, 1)
assert q.label == (Integer(0), Integer(1))
q = QExpr._new_rawargs(HilbertSpace(), Integer(0), Integer(1))
assert q.label == (Integer(0), Integer(1))
assert q.hilbert_space == HilbertSpace()
def test_qexpr_commutative():
q1 = QExpr(x)
q2 = QExpr(y)
assert q1.is_commutative is False
assert q2.is_commutative is False
assert q1*q2 != q2*q1
q = QExpr._new_rawargs(0, 1, HilbertSpace())
assert q.is_commutative is False
def test_qexpr_commutative_free_symbols():
q1 = QExpr(x)
assert q1.free_symbols.pop().is_commutative is False
q2 = QExpr('q2')
assert q2.free_symbols.pop().is_commutative is False
def test_qexpr_subs():
q1 = QExpr(x, y)
assert q1.subs(x, y) == QExpr(y, y)
assert q1.subs({x: 1, y: 2}) == QExpr(1, 2)
def test_qsympify():
assert _qsympify_sequence([[1, 2], [1, 3]]) == (Tuple(1, 2), Tuple(1, 3))
assert _qsympify_sequence(([1, 2, [3, 4, [2, ]], 1], 3)) == \
(Tuple(1, 2, Tuple(3, 4, Tuple(2,)), 1), 3)
assert _qsympify_sequence((1,)) == (1,)
| 0.001373 |
'''
Classes for using multipart form data from Python, which does not (at the
time of writing) support this directly.
To use this, make an instance of Multipart and add parts to it via the factory
methods field and file. When you are done, get the content via the get method.
@author: Stacy Prowell (http://stacyprowell.com)
'''
import mimetypes
class Part(object):
'''
Class holding a single part of the form. You should never need to use
this class directly; instead, use the factory methods in Multipart:
field and file.
'''
# The boundary to use. This is shamelessly taken from the standard.
BOUNDARY = '----------AaB03x'
CRLF = '\r\n'
# Common headers.
CONTENT_TYPE = 'Content-Type'
CONTENT_DISPOSITION = 'Content-Disposition'
# The default content type for parts.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
def __init__(self, name, filename, body, headers):
'''
Make a new part. The part will have the given headers added initially.
@param name: The part name.
@type name: str
@param filename: If this is a file, the name of the file. Otherwise
None.
@type filename: str
@param body: The body of the part.
@type body: str
@param headers: Additional headers, or overrides, for this part.
You can override Content-Type here.
@type headers: dict
'''
self._headers = headers.copy()
self._name = name
self._filename = filename
self._body = body
# We respect any content type passed in, but otherwise set it here.
# We set the content disposition now, overwriting any prior value.
if self._filename == None:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"' % self._name)
self._headers.setdefault(Part.CONTENT_TYPE,
Part.DEFAULT_CONTENT_TYPE)
else:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"; filename="%s"' %
(self._name, self._filename))
self._headers.setdefault(Part.CONTENT_TYPE,
mimetypes.guess_type(filename)[0]
or Part.DEFAULT_CONTENT_TYPE)
return
def get(self):
'''
Convert the part into a list of lines for output. This includes
the boundary lines, part header lines, and the part itself. A
blank line is included between the header and the body.
@return: Lines of this part.
@rtype: list
'''
lines = []
lines.append('--' + Part.BOUNDARY)
for (key, val) in self._headers.items():
lines.append(str('%s: %s' % (key, val)))
lines.append('')
lines.append(self._body)
return lines
class Multipart(object):
'''
Encapsulate multipart form data. To use this, make an instance and then
add parts to it via the two methods (field and file). When done, you can
get the result via the get method.
See http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2 for
details on multipart/form-data.
Watch http://bugs.python.org/issue3244 to see if this is fixed in the
Python libraries.
@return: content type, body
@rtype: tuple
'''
def __init__(self):
self.parts = []
return
def field(self, name, value, headers={}):
'''
Create and append a field part. This kind of part has a field name
and value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, None, value, headers))
return
def file(self, name, filename, value, headers={}):
'''
Create and append a file part. THis kind of part has a field name,
a filename, and a value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, filename, value, headers))
return
def get(self):
'''
Get the multipart form data. This returns the content type, which
specifies the boundary marker, and also returns the body containing
all parts and bondary markers.
@return: content type, body
@rtype: tuple
'''
all = []
for part in self.parts:
all += part.get()
all.append('--' + Part.BOUNDARY + '--')
all.append('')
# We have to return the content type, since it specifies the boundary.
content_type = 'multipart/form-data; boundary=%s' % Part.BOUNDARY
return content_type, Part.CRLF.join(all)
| 0.000196 |
#coding=utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from dashboard.models import *
from returner.models import *
import logging
from shaker.tasks import dashboard_task, grains_task
from shaker.check_service import CheckPort, CheckProgress
logger = logging.getLogger('django')
@login_required(login_url="/account/login/")
def index(request):
try:
dashboard_task.delay()
grains_task.delay()
except:
logger.error("Connection refused, don't connect rabbitmq service")
try:
dashboard_status = Dashboard_status.objects.get(id=1)
except:
status_list = [0, 0, 0, 0, 0]
else:
status_list = [int(dashboard_status.up),
int(dashboard_status.down),
int(dashboard_status.accepted),
int(dashboard_status.unaccepted),
int(dashboard_status.rejected),
]
logger.info(status_list)
salt_grains = Salt_grains.objects.all()
release_list = []
os_all = []
os_release = []
for release in salt_grains:
release_dic = eval(release.grains)
release_info = release_dic.get('osfullname').decode('string-escape') + release_dic.get('osrelease').decode('string-escape')
release_list.append(release_info)
os_release = list(set(release_list))
#定义
logger.info(os_release)
for release_name in os_release:
os_dic = {'name': release_name, 'value': release_list.count(release_name)}
os_all.append(os_dic)
logger.info(os_all)
salt_master_stauts = CheckPort('Salt Master', '127.0.0.1', 4505)
salt_api_status = CheckPort('Salt Api', '127.0.0.1', 8000)
rabbitmy_status = CheckPort('RabbixMQ', '127.0.0.1', 5672)
rabbitmy_m_status = CheckPort('RabbixMQ Management', '127.0.0.1', 15672)
celery_statu = CheckProgress('Celery', 'celery worker')
check_service = [salt_master_stauts, salt_api_status, rabbitmy_status, rabbitmy_m_status, celery_statu]
return render(request, 'dashboard/index.html', {'status': status_list,
'os_release': os_release,
'os_all': os_all,
'check_service': check_service,
})
| 0.006211 |
from __future__ import unicode_literals
from .common import InfoExtractor
from .internetvideoarchive import InternetVideoArchiveIE
class RottenTomatoesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)'
_TEST = {
'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/',
'info_dict': {
'id': '11028566',
'ext': 'mp4',
'title': 'Toy Story 3',
'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.',
'thumbnail': r're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id')
return {
'_type': 'url_transparent',
'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id,
'ie_key': InternetVideoArchiveIE.ie_key(),
'id': video_id,
'title': self._og_search_title(webpage),
}
| 0.003903 |
#!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_domain_facts
short_description: Retrieve facts about one or more OpenStack domains
extends_documentation_fragment: openstack
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Retrieve facts about a one or more OpenStack domains
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- Name or ID of the domain
required: true
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
'''
EXAMPLES = '''
# Gather facts about previously created domain
- os_keystone_domain_facts:
cloud: awesomecloud
- debug:
var: openstack_domains
# Gather facts about a previously created domain by name
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
- debug:
var: openstack_domains
# Gather facts about a previously created domain with filter
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
filters:
enabled: False
- debug:
var: openstack_domains
'''
RETURN = '''
openstack_domains:
description: has all the OpenStack facts about domains
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the domain.
returned: success
type: string
description:
description: Description of the domain.
returned: success
type: string
enabled:
description: Flag to indicate if the domain is enabled.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, type='dict', default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'filters'],
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
name = module.params['name']
filters = module.params['filters']
opcloud = shade.operator_cloud(**module.params)
if name:
# Let's suppose user is passing domain ID
try:
domains = cloud.get_domain(name)
except:
domains = opcloud.search_domains(filters={'name': name})
else:
domains = opcloud.search_domains(filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_domains=domains))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 0.000971 |
"""
Django admin page for credit eligibility
"""
from ratelimitbackend import admin
from openedx.core.djangoapps.credit.models import (
CreditConfig, CreditCourse, CreditProvider, CreditEligibility, CreditRequest, CreditRequirement,
CreditRequirementStatus
)
class CreditCourseAdmin(admin.ModelAdmin):
"""Admin for credit courses. """
list_display = ('course_key', 'enabled',)
list_filter = ('enabled',)
search_fields = ('course_key',)
class Meta(object):
model = CreditCourse
class CreditProviderAdmin(admin.ModelAdmin):
"""Admin for credit providers. """
list_display = ('provider_id', 'display_name', 'active',)
list_filter = ('active',)
search_fields = ('provider_id', 'display_name')
class Meta(object):
model = CreditProvider
class CreditEligibilityAdmin(admin.ModelAdmin):
"""Admin for credit eligibility. """
list_display = ('course', 'username', 'deadline')
search_fields = ('username', 'course__course_key')
class Meta(object):
model = CreditEligibility
class CreditRequestAdmin(admin.ModelAdmin):
"""Admin for credit requests. """
list_display = ('provider', 'course', 'status', 'username')
list_filter = ('provider', 'status',)
readonly_fields = ('uuid',)
search_fields = ('uuid', 'username', 'course__course_key', 'provider__provider_id')
class Meta(object):
model = CreditRequest
class CreditRequirementAdmin(admin.ModelAdmin):
""" Admin for CreditRequirement. """
list_display = ('course', 'namespace', 'name', 'display_name', 'active',)
list_filter = ('active', 'namespace',)
search_fields = ('course__course_key', 'namespace', 'name',)
class Meta(object):
model = CreditRequirement
class CreditRequirementStatusAdmin(admin.ModelAdmin):
""" Admin for CreditRequirementStatus. """
list_display = ('username', 'requirement', 'status',)
search_fields = ('username', 'requirement__course__course_key',)
class Meta(object):
model = CreditRequirementStatus
admin.site.register(CreditCourse, CreditCourseAdmin)
admin.site.register(CreditProvider, CreditProviderAdmin)
admin.site.register(CreditEligibility, CreditEligibilityAdmin)
admin.site.register(CreditRequest, CreditRequestAdmin)
admin.site.register(CreditConfig)
admin.site.register(CreditRequirement, CreditRequirementAdmin)
admin.site.register(CreditRequirementStatus, CreditRequirementStatusAdmin)
| 0.000813 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
class TwitterRecentEntries(CMSPlugin):
title = models.CharField(_('title'), max_length=75, blank=True)
twitter_user = models.CharField(_('twitter user'), max_length=75)
count = models.PositiveSmallIntegerField(_('count'), help_text=_('Number of entries to display'), default=3)
link_hint = models.CharField(_('link hint'), max_length=75, blank=True, help_text=_('If given, the hint is displayed as link to your Twitter profile.'))
def __unicode__(self):
return self.title
class TwitterSearch(CMSPlugin):
title = models.CharField(_('title'), max_length=75, blank=True)
query = models.CharField(_('query'), max_length=200, blank=True, default='', help_text=_('Example: "brains AND zombies AND from:umbrella AND to:nemesis": tweets from the user "umbrella" to the user "nemesis" that contain the words "brains" and "zombies"'))
count = models.PositiveSmallIntegerField(_('count'), help_text=_('Number of entries to display'), default=3)
def __unicode__(self):
return self.title | 0.007705 |
from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSPointerFunctions (TestCase):
def testConstants(self):
self.assertEqual(NSPointerFunctionsStrongMemory, (0 << 0))
self.assertEqual(NSPointerFunctionsZeroingWeakMemory, (1 << 0))
self.assertEqual(NSPointerFunctionsOpaqueMemory, (2 << 0))
self.assertEqual(NSPointerFunctionsMallocMemory, (3 << 0))
self.assertEqual(NSPointerFunctionsMachVirtualMemory, (4 << 0))
self.assertEqual(NSPointerFunctionsObjectPersonality, (0 << 8))
self.assertEqual(NSPointerFunctionsOpaquePersonality, (1 << 8))
self.assertEqual(NSPointerFunctionsObjectPointerPersonality, (2 << 8))
self.assertEqual(NSPointerFunctionsCStringPersonality, (3 << 8))
self.assertEqual(NSPointerFunctionsStructPersonality, (4 << 8))
self.assertEqual(NSPointerFunctionsIntegerPersonality, (5 << 8))
self.assertEqual(NSPointerFunctionsCopyIn, (1 << 16))
@min_os_level('10.8')
def testConstants10_8(self):
self.assertEqual(NSPointerFunctionsWeakMemory, 5<<0)
def testPropType(self):
o = NSPointerFunctions.alloc().initWithOptions_(0)
v = o.usesStrongWriteBarrier()
self.assertTrue((v is True) or (v is False) )
self.assertArgIsBOOL(o.setUsesStrongWriteBarrier_, 0)
self.assertArgIsBOOL(o.setUsesWeakReadAndWriteBarriers_, 0)
v = o.usesWeakReadAndWriteBarriers()
self.assertTrue((v is True) or (v is False) )
if __name__ == "__main__":
main()
| 0.002566 |
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from datetime import datetime
import urllib.parse
import urllib.request, urllib.parse, urllib.error
import base64
import hashlib
import hmac
import logging
import operator
import re
from .http_client import HTTP_GET, HTTP_POST
from ..utility import misc
log = logging.getLogger('aws')
class AWSSignature:
SigV2 = '2'
SigV4 = '4'
def __init__(self, accesskey, secretkey, endpoint,
region, service_name, api_version,
signature_version = SigV2,
terminator = 'aws4_request'):
'''
Constructor
'''
self._accesskey = accesskey
self._secretkey = secretkey
self._endpoint = endpoint
self._region = region
self._service_name = service_name
self._api_version = api_version
self._signature_version = signature_version
self._terminator = terminator
def v2_sign(self, verb, request_string):
# This assumes path is always '/'.
stringToSign = verb + '\n' + urllib.parse.urlsplit(self._endpoint)[1] + '\n/\n' + request_string
return base64.b64encode(hmac.new(misc.to_bytes(self._secretkey),
misc.to_bytes(stringToSign),
hashlib.sha256).digest())
def v4_sign(self, verb, query_string, headers, region, service_name, timestamp):
#TODO: Now this assumes path is always '/'.
formatted_timestamp = timestamp.strftime('%Y%m%dT%H%M%SZ')
date = timestamp.strftime('%Y%m%d')
scope = date + '/' + self._region + '/' + self._service_name + '/' + self._terminator
# Process headers
headers['Host'] = urllib.parse.urlsplit(self._endpoint).netloc
if 'Date' in headers:
del headers['Date']
headers['X-Amz-Date'] = formatted_timestamp
(canonical_headers, signed_headers) = self._canonicalize_headers(headers)
# Generate canonical query string for signature
canonical_request = verb + '\n/\n'
canonical_request += (query_string if verb == HTTP_GET else '') + '\n'
canonical_request += canonical_headers + '\n' + signed_headers + '\n'
canonical_request += hashlib.sha256(query_string.encode('utf-8') \
if verb == HTTP_POST else '').hexdigest()
# Generate string to sign
string_to_sign = 'AWS4-HMAC-SHA256\n' + formatted_timestamp + '\n' + scope + '\n' \
+ hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
# Generate signing key
derived_key = hmac.new(('AWS4' + self._secretkey).encode('utf-8'),
date.encode('utf-8'), hashlib.sha256).digest()
derived_key = hmac.new(derived_key,
self._region.encode('utf-8'), hashlib.sha256).digest()
derived_key = hmac.new(derived_key,
self._service_name.encode('utf-8'), hashlib.sha256).digest()
derived_key = hmac.new(derived_key,
'aws4_request'.encode('utf-8'), hashlib.sha256).digest()
# Sign
signature = hmac.new(derived_key,
string_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
# Fill signature into header (recommended way)
credential = self._accesskey + '/' + scope
headers['Authorization'] = 'AWS4-HMAC-SHA256 Credential=%s,SignedHeaders=%s,Signature=%s' \
% (credential, signed_headers, signature)
return headers
def construct_get_url(self, params, headers):
host = self._endpoint if self._endpoint.endswith('/') else self._endpoint + '/'
if self._signature_version == self.SigV2:
query_string = self._generate_v2_query_string(params)
return (misc.to_bytes(host + '?' + query_string + '&Signature=' \
+ urllib.parse.quote(self.v2_sign(HTTP_GET, query_string))),
headers)
elif self._signature_version == self.SigV4:
timestamp = datetime.utcnow().isoformat()
query_string = self._generate_v4_query_string(params, timestamp)
return (misc.to_bytes(host + '?' + query_string),
self.v4_sign(HTTP_POST, query_string, headers, self._region,
self._service_name, timestamp))
else:
raise AttributeError('Not supported signature version: "{0}"'.\
format(self._signature_version))
def construct_post_data(self, params, headers):
if self._signature_version == self.SigV2:
query_string = self._generate_v2_query_string(params)
return (misc.to_bytes(query_string + '&Signature=' \
+ urllib.parse.quote(self.v2_sign(HTTP_POST, query_string))),
headers)
elif self._signature_version == self.SigV4:
timestamp = datetime.utcnow()
query_string = self._generate_v4_query_string(params, timestamp.isoformat())
return (query_string,
self.v4_sign(HTTP_POST, query_string, headers, self._region,
self._service_name, timestamp))
else:
raise AttributeError('Not supported signature version: "{0}"'.\
format(self._signature_version))
def _generate_v2_query_string(self, params):
data = dict(params)
data['SignatureVersion'] = self._signature_version
data['Version'] = self._api_version
data['AWSAccessKeyId'] = self._accesskey
data['Timestamp'] = datetime.utcnow().isoformat()
data['SignatureMethod'] = 'HmacSHA256'
data['ContentType'] = 'JSON'
return self._construct_query(data)
def _generate_v4_query_string(self, params, timestamp):
data = dict(params)
data['Version'] = self._api_version
data['Timestamp'] = timestamp
data['ContentType'] = 'JSON'
return self._construct_query(data)
def _canonicalize_uri(self, uri):
split = urllib.parse.urlsplit(uri)
if not split.path:
return '/'
path = urllib.parse.urlsplit(urllib.parse.urljoin('http://foo.com', \
split.path.lstrip('/'))).path.rstrip('/')
return urllib.parse.quote(misc.to_bytes(path), '/~') if path else '/'
def _canonicalize_headers(self, headers):
canon_headers = {}
for key, value in ((key.lower(), re.sub(r'(?su)[\s]+', ' ', value).strip()) \
for key, value in headers.items()):
if key in canon_headers:
canon_headers[key] = canon_headers[key] + ',' + value
else:
canon_headers[key] = value
sorted_entries = sorted(iter(canon_headers.items()), key=operator.itemgetter(0))
return ('\n'.join((':'.join(entry) for entry in sorted_entries)) \
+ '\n', ';'.join((entry[0] for entry in sorted_entries)))
def _construct_query(self, params):
if not params:
return ''
ret_str = ''
for k, vs in sorted(iter(params.items()), key=operator.itemgetter(0)):
if isinstance(vs, list):
for v in sorted(vs):
ret_str += '&'.join(urllib.parse.quote(misc.to_bytes(k), safe='~') \
+ '=' + urllib.parse.quote(misc.to_bytes(v), safe='~'))
else:
if ret_str:
ret_str += '&'
ret_str += urllib.parse.quote(misc.to_bytes(k), safe='~') \
+ '=' + urllib.parse.quote(misc.to_bytes(vs), safe='~')
return ret_str
| 0.011215 |
import json
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (
OAuthAdapter,
OAuthCallbackView,
OAuthLoginView,
)
from .provider import TumblrProvider
class TumblrAPI(OAuth):
url = 'http://api.tumblr.com/v2/user/info'
def get_user_info(self):
data = json.loads(self.query(self.url))
return data['response']['user']
class TumblrOAuthAdapter(OAuthAdapter):
provider_id = TumblrProvider.id
request_token_url = 'https://www.tumblr.com/oauth/request_token'
access_token_url = 'https://www.tumblr.com/oauth/access_token'
authorize_url = 'https://www.tumblr.com/oauth/authorize'
def complete_login(self, request, app, token, response):
client = TumblrAPI(request, app.client_id, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth_login = OAuthLoginView.adapter_view(TumblrOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(TumblrOAuthAdapter)
| 0 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
sales_orders = frappe.db.sql("""select name from `tabSales Order`
where docstatus = 1 and ifnull(is_recurring, 0) = 1
and (per_delivered > 0 or per_billed > 0)""", as_dict=1)
for so in sales_orders:
if not frappe.db.exists("Delivery Note Item", {"against_sales_order": so.name, "docstatus": 1}):
frappe.db.sql("""update `tabSales Order` set per_delivered = 0,
delivery_status = 'Not Delivered' where name = %s""", so.name)
frappe.db.sql("""update `tabSales Order Item` set delivered_qty = 0
where parent = %s""", so.name)
if not frappe.db.exists("Sales Invoice Item", {"sales_order": so.name, "docstatus": 1}):
frappe.db.sql("""update `tabSales Order` set per_billed = 0,
billing_status = 'Not Billed' where name = %s""", so.name)
frappe.db.sql("""update `tabSales Order Item` set billed_amt = 0
where parent = %s""", so.name)
purchase_orders = frappe.db.sql("""select name from `tabPurchase Order`
where docstatus = 1 and ifnull(is_recurring, 0) = 1
and (per_received > 0 or per_billed > 0)""", as_dict=1)
for po in purchase_orders:
if not frappe.db.exists("Purchase Receipt Item", {"prevdoc_doctype": "Purchase Order",
"prevdoc_docname": po.name, "docstatus": 1}):
frappe.db.sql("""update `tabPurchase Order` set per_received = 0
where name = %s""", po.name)
frappe.db.sql("""update `tabPurchase Order Item` set received_qty = 0
where parent = %s""", po.name)
if not frappe.db.exists("Purchase Invoice Item", {"purchase_order": po.name, "docstatus": 1}):
frappe.db.sql("""update `tabPurchase Order` set per_billed = 0
where name = %s""", po.name)
frappe.db.sql("""update `tabPurchase Order Item` set billed_amt = 0
where parent = %s""", po.name) | 0.022645 |
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
help = 'Discover and run tests in the specified modules or the current directory.'
requires_system_checks = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument('args', metavar='test_label', nargs='*',
help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method')
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.')
parser.add_argument('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.')
parser.add_argument('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081-8179.')
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, 'add_arguments'):
test_runner_class.add_arguments(parser)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options['testrunner'])
if options['liveserver'] is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
| 0.005822 |
import logging
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Optional, Type
from django.conf import settings
from django.db.models.query import QuerySet
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.utils import translation
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from analytics.lib.counts import COUNT_STATS, CountStat
from analytics.lib.time_utils import time_range
from analytics.models import (
BaseCount,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
)
from zerver.decorator import (
require_non_guest_user,
require_server_admin,
require_server_admin_api,
to_utc_datetime,
zulip_login_required,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.i18n import get_and_set_request_language, get_language_translation_data
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.timestamp import convert_to_UTC
from zerver.lib.validator import to_non_negative_int
from zerver.models import Client, Realm, UserProfile, get_realm
if settings.ZILENCER_ENABLED:
from zilencer.models import RemoteInstallationCount, RemoteRealmCount, RemoteZulipServer
MAX_TIME_FOR_FULL_ANALYTICS_GENERATION = timedelta(days=1, minutes=30)
def is_analytics_ready(realm: Realm) -> bool:
return (timezone_now() - realm.date_created) > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION
def render_stats(
request: HttpRequest,
data_url_suffix: str,
target_name: str,
for_installation: bool = False,
remote: bool = False,
analytics_ready: bool = True,
) -> HttpRequest:
page_params = dict(
data_url_suffix=data_url_suffix,
for_installation=for_installation,
remote=remote,
)
request_language = get_and_set_request_language(
request,
request.user.default_language,
translation.get_language_from_path(request.path_info),
)
page_params["translation_data"] = get_language_translation_data(request_language)
return render(
request,
"analytics/stats.html",
context=dict(
target_name=target_name, page_params=page_params, analytics_ready=analytics_ready
),
)
@zulip_login_required
def stats(request: HttpRequest) -> HttpResponse:
realm = request.user.realm
if request.user.is_guest:
# TODO: Make @zulip_login_required pass the UserProfile so we
# can use @require_member_or_admin
raise JsonableError(_("Not allowed for guest users"))
return render_stats(
request, "", realm.name or realm.string_id, analytics_ready=is_analytics_ready(realm)
)
@require_server_admin
@has_request_variables
def stats_for_realm(request: HttpRequest, realm_str: str) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
return HttpResponseNotFound()
return render_stats(
request,
f"/realm/{realm_str}",
realm.name or realm.string_id,
analytics_ready=is_analytics_ready(realm),
)
@require_server_admin
@has_request_variables
def stats_for_remote_realm(
request: HttpRequest, remote_server_id: int, remote_realm_id: int
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(
request,
f"/remote/{server.id}/realm/{remote_realm_id}",
f"Realm {remote_realm_id} on server {server.hostname}",
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_realm(
request: HttpRequest, user_profile: UserProfile, realm_str: str, **kwargs: Any
) -> HttpResponse:
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
raise JsonableError(_("Invalid organization"))
return get_chart_data(request=request, user_profile=user_profile, realm=realm, **kwargs)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_realm(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
remote_realm_id: int,
**kwargs: Any,
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(
request=request,
user_profile=user_profile,
server=server,
remote=True,
remote_realm_id=int(remote_realm_id),
**kwargs,
)
@require_server_admin
def stats_for_installation(request: HttpRequest) -> HttpResponse:
return render_stats(request, "/installation", "installation", True)
@require_server_admin
def stats_for_remote_installation(request: HttpRequest, remote_server_id: int) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return render_stats(
request,
f"/remote/{server.id}/installation",
f"remote installation {server.hostname}",
True,
True,
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_installation(
request: HttpRequest, user_profile: UserProfile, chart_name: str = REQ(), **kwargs: Any
) -> HttpResponse:
return get_chart_data(
request=request, user_profile=user_profile, for_installation=True, **kwargs
)
@require_server_admin_api
@has_request_variables
def get_chart_data_for_remote_installation(
request: HttpRequest,
user_profile: UserProfile,
remote_server_id: int,
chart_name: str = REQ(),
**kwargs: Any,
) -> HttpResponse:
assert settings.ZILENCER_ENABLED
server = RemoteZulipServer.objects.get(id=remote_server_id)
return get_chart_data(
request=request,
user_profile=user_profile,
for_installation=True,
remote=True,
server=server,
**kwargs,
)
@require_non_guest_user
@has_request_variables
def get_chart_data(
request: HttpRequest,
user_profile: UserProfile,
chart_name: str = REQ(),
min_length: Optional[int] = REQ(converter=to_non_negative_int, default=None),
start: Optional[datetime] = REQ(converter=to_utc_datetime, default=None),
end: Optional[datetime] = REQ(converter=to_utc_datetime, default=None),
realm: Optional[Realm] = None,
for_installation: bool = False,
remote: bool = False,
remote_realm_id: Optional[int] = None,
server: Optional["RemoteZulipServer"] = None,
) -> HttpResponse:
if for_installation:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteInstallationCount
assert server is not None
else:
aggregate_table = InstallationCount
else:
if remote:
assert settings.ZILENCER_ENABLED
aggregate_table = RemoteRealmCount
assert server is not None
assert remote_realm_id is not None
else:
aggregate_table = RealmCount
if chart_name == "number_of_humans":
stats = [
COUNT_STATS["1day_actives::day"],
COUNT_STATS["realm_active_humans::day"],
COUNT_STATS["active_users_audit:is_bot:day"],
]
tables = [aggregate_table]
subgroup_to_label: Dict[CountStat, Dict[Optional[str], str]] = {
stats[0]: {None: "_1day"},
stats[1]: {None: "_15day"},
stats[2]: {"false": "all_time"},
}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == "messages_sent_over_time":
stats = [COUNT_STATS["messages_sent:is_bot:hour"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {"false": "human", "true": "bot"}}
labels_sort_function = None
include_empty_subgroups = True
elif chart_name == "messages_sent_by_message_type":
stats = [COUNT_STATS["messages_sent:message_type:day"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {
stats[0]: {
"public_stream": _("Public streams"),
"private_stream": _("Private streams"),
"private_message": _("Private messages"),
"huddle_message": _("Group private messages"),
}
}
labels_sort_function = lambda data: sort_by_totals(data["everyone"])
include_empty_subgroups = True
elif chart_name == "messages_sent_by_client":
stats = [COUNT_STATS["messages_sent:client:day"]]
tables = [aggregate_table, UserCount]
# Note that the labels are further re-written by client_label_map
subgroup_to_label = {
stats[0]: {str(id): name for id, name in Client.objects.values_list("id", "name")}
}
labels_sort_function = sort_client_labels
include_empty_subgroups = False
elif chart_name == "messages_read_over_time":
stats = [COUNT_STATS["messages_read::hour"]]
tables = [aggregate_table, UserCount]
subgroup_to_label = {stats[0]: {None: "read"}}
labels_sort_function = None
include_empty_subgroups = True
else:
raise JsonableError(_("Unknown chart name: {}").format(chart_name))
# Most likely someone using our API endpoint. The /stats page does not
# pass a start or end in its requests.
if start is not None:
start = convert_to_UTC(start)
if end is not None:
end = convert_to_UTC(end)
if start is not None and end is not None and start > end:
raise JsonableError(
_("Start time is later than end time. Start: {start}, End: {end}").format(
start=start,
end=end,
)
)
if realm is None:
# Note that this value is invalid for Remote tables; be
# careful not to access it in those code paths.
realm = user_profile.realm
if remote:
# For remote servers, we don't have fillstate data, and thus
# should simply use the first and last data points for the
# table.
assert server is not None
if not aggregate_table.objects.filter(server=server).exists():
raise JsonableError(
_("No analytics data available. Please contact your server administrator.")
)
if start is None:
start = aggregate_table.objects.filter(server=server).first().end_time
if end is None:
end = aggregate_table.objects.filter(server=server).last().end_time
else:
# Otherwise, we can use tables on the current server to
# determine a nice range, and some additional validation.
if start is None:
if for_installation:
start = installation_epoch()
else:
start = realm.date_created
if end is None:
end = max(
stat.last_successful_fill() or datetime.min.replace(tzinfo=timezone.utc)
for stat in stats
)
if start > end and (timezone_now() - start > MAX_TIME_FOR_FULL_ANALYTICS_GENERATION):
logging.warning(
"User from realm %s attempted to access /stats, but the computed "
"start time: %s (creation of realm or installation) is later than the computed "
"end time: %s (last successful analytics update). Is the "
"analytics cron job running?",
realm.string_id,
start,
end,
)
raise JsonableError(
_("No analytics data available. Please contact your server administrator.")
)
assert len({stat.frequency for stat in stats}) == 1
end_times = time_range(start, end, stats[0].frequency, min_length)
data: Dict[str, Any] = {
"end_times": [int(end_time.timestamp()) for end_time in end_times],
"frequency": stats[0].frequency,
}
aggregation_level = {
InstallationCount: "everyone",
RealmCount: "everyone",
UserCount: "user",
}
if settings.ZILENCER_ENABLED:
aggregation_level[RemoteInstallationCount] = "everyone"
aggregation_level[RemoteRealmCount] = "everyone"
# -1 is a placeholder value, since there is no relevant filtering on InstallationCount
id_value = {
InstallationCount: -1,
RealmCount: realm.id,
UserCount: user_profile.id,
}
if settings.ZILENCER_ENABLED:
if server is not None:
id_value[RemoteInstallationCount] = server.id
# TODO: RemoteRealmCount logic doesn't correctly handle
# filtering by server_id as well.
if remote_realm_id is not None:
id_value[RemoteRealmCount] = remote_realm_id
for table in tables:
data[aggregation_level[table]] = {}
for stat in stats:
data[aggregation_level[table]].update(
get_time_series_by_subgroup(
stat,
table,
id_value[table],
end_times,
subgroup_to_label[stat],
include_empty_subgroups,
)
)
if labels_sort_function is not None:
data["display_order"] = labels_sort_function(data)
else:
data["display_order"] = None
return json_success(data=data)
def sort_by_totals(value_arrays: Dict[str, List[int]]) -> List[str]:
totals = [(sum(values), label) for label, values in value_arrays.items()]
totals.sort(reverse=True)
return [label for total, label in totals]
# For any given user, we want to show a fixed set of clients in the chart,
# regardless of the time aggregation or whether we're looking at realm or
# user data. This fixed set ideally includes the clients most important in
# understanding the realm's traffic and the user's traffic. This function
# tries to rank the clients so that taking the first N elements of the
# sorted list has a reasonable chance of doing so.
def sort_client_labels(data: Dict[str, Dict[str, List[int]]]) -> List[str]:
realm_order = sort_by_totals(data["everyone"])
user_order = sort_by_totals(data["user"])
label_sort_values: Dict[str, float] = {}
for i, label in enumerate(realm_order):
label_sort_values[label] = i
for i, label in enumerate(user_order):
label_sort_values[label] = min(i - 0.1, label_sort_values.get(label, i))
return [label for label, sort_value in sorted(label_sort_values.items(), key=lambda x: x[1])]
def table_filtered_to_id(table: Type[BaseCount], key_id: int) -> QuerySet:
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
elif settings.ZILENCER_ENABLED and table == RemoteInstallationCount:
return RemoteInstallationCount.objects.filter(server_id=key_id)
elif settings.ZILENCER_ENABLED and table == RemoteRealmCount:
return RemoteRealmCount.objects.filter(realm_id=key_id)
else:
raise AssertionError(f"Unknown table: {table}")
def client_label_map(name: str) -> str:
if name == "website":
return "Website"
if name.startswith("desktop app"):
return "Old desktop app"
if name == "ZulipElectron":
return "Desktop app"
if name == "ZulipAndroid":
return "Old Android app"
if name == "ZulipiOS":
return "Old iOS app"
if name == "ZulipMobile":
return "Mobile app"
if name in ["ZulipPython", "API: Python"]:
return "Python API"
if name.startswith("Zulip") and name.endswith("Webhook"):
return name[len("Zulip") : -len("Webhook")] + " webhook"
return name
def rewrite_client_arrays(value_arrays: Dict[str, List[int]]) -> Dict[str, List[int]]:
mapped_arrays: Dict[str, List[int]] = {}
for label, array in value_arrays.items():
mapped_label = client_label_map(label)
if mapped_label in mapped_arrays:
for i in range(0, len(array)):
mapped_arrays[mapped_label][i] += value_arrays[label][i]
else:
mapped_arrays[mapped_label] = [value_arrays[label][i] for i in range(0, len(array))]
return mapped_arrays
def get_time_series_by_subgroup(
stat: CountStat,
table: Type[BaseCount],
key_id: int,
end_times: List[datetime],
subgroup_to_label: Dict[Optional[str], str],
include_empty_subgroups: bool,
) -> Dict[str, List[int]]:
queryset = (
table_filtered_to_id(table, key_id)
.filter(property=stat.property)
.values_list("subgroup", "end_time", "value")
)
value_dicts: Dict[Optional[str], Dict[datetime, int]] = defaultdict(lambda: defaultdict(int))
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in subgroup_to_label.items():
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
if stat == COUNT_STATS["messages_sent:client:day"]:
# HACK: We rewrite these arrays to collapse the Client objects
# with similar names into a single sum, and generally give
# them better names
return rewrite_client_arrays(value_arrays)
return value_arrays
| 0.001685 |
# Ben Jones [email protected]
# Georgia Tech Fall 2014
#
# tcpdump.py: interface to tcpdump to stop and start captures and do
# second passes over existing pcaps
from base64 import b64encode
import logging
import os
import tempfile
# local imports
import centinel
from centinel import command
class Tcpdump():
"""Class to interface between tcpdump and Python"""
def __init__(self, filename=None, pcap_args=None):
if filename is None:
temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
temp_file.close()
filename = temp_file.name
self.filename = filename
# don't change this to default value because it is a mutable
# type and whatever you change it to will become the new
# default value until the interpreter is restarted
if pcap_args is None:
# use the centinel configured tcpdump options if available
# (if not specified by the user, this will be -i any, so
# the same as below
if 'tcpdump_params' in centinel.conf['experiments']:
pcap_args = centinel.conf['experiments']['tcpdump_params']
# for backwards compatability, ensure that we give some
# pcap args for what to capture
else:
pcap_args = ["-i", "any"]
logging.warning("Global config not available, so falling "
"back on -i any pcap args")
self.pcap_args = pcap_args
def start(self):
cmd = ['sudo', 'tcpdump', '-w', self.filename]
cmd.extend(self.pcap_args)
self.caller = command.Command(cmd, _tcpdump_callback)
self.caller.start()
def stop(self):
if self.caller is not None:
self.caller.stop()
def post_processing(self, out_filter, out_file=None):
if out_file is None:
temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
temp_file.close()
out_file = temp_file.name
cmd = ['tcpdump', '-r', self.filename, '-w', out_file]
caller = command.Command(cmd, _tcpdump_callback)
caller.start()
def b64_output(self):
with open(self.filename, 'r') as file_p:
return b64encode(file_p.read())
def pcap(self):
with open(self.filename, 'r') as file_p:
return file_p.read()
def delete(self):
os.remove(self.filename)
def _tcpdump_callback(self, line, kill_switch):
"""Callback function to handle tcpdump"""
line = line.lower()
if ("listening" in line) or ("reading" in line):
self.started = True
if ("no suitable device" in line):
self.error = True
self.kill_switch()
if "by kernel" in line:
self.stopped = True
| 0 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Sample command-line program for listing Google Dataproc Clusters"""
import argparse
import os
from google.cloud import storage
import googleapiclient.discovery
# Currently only the "global" region is supported
REGION = 'global'
DEFAULT_FILENAME = 'pyspark_sort.py'
def get_default_pyspark_file():
"""Gets the PySpark file from this directory"""
current_dir = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(current_dir, DEFAULT_FILENAME), 'r')
return f, DEFAULT_FILENAME
def get_pyspark_file(filename):
f = open(filename, 'r')
return f, os.path.basename(filename)
def upload_pyspark_file(project_id, bucket_name, filename, file):
"""Uploads the PySpark file in this directory to the configured
input bucket."""
print('Uploading pyspark file to GCS')
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
def download_output(project_id, cluster_id, output_bucket, job_id):
"""Downloads the output file from Cloud Storage and returns it as a
string."""
print('Downloading output file')
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = (
'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'
.format(cluster_id, job_id))
return bucket.blob(output_blob).download_as_string()
# [START create_cluster]
def create_cluster(dataproc, project, cluster_name, zone):
print('Creating cluster.')
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
project, zone)
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
}
}
}
result = dataproc.projects().regions().clusters().create(
projectId=project,
region=REGION,
body=cluster_data).execute()
return result
# [END create_cluster]
def wait_for_cluster_creation(dataproc, project_id, cluster_name, zone):
print('Waiting for cluster creation')
while True:
result = dataproc.projects().regions().clusters().list(
projectId=project_id,
region=REGION).execute()
cluster_list = result['clusters']
cluster = [c
for c in cluster_list
if c['clusterName'] == cluster_name][0]
if cluster['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
if cluster['status']['state'] == 'RUNNING':
print("Cluster created.")
break
# [START list_clusters_with_detail]
def list_clusters_with_details(dataproc, project):
result = dataproc.projects().regions().clusters().list(
projectId=project,
region=REGION).execute()
cluster_list = result['clusters']
for cluster in cluster_list:
print("{} - {}"
.format(cluster['clusterName'], cluster['status']['state']))
return result
# [END list_clusters_with_detail]
def get_cluster_id_by_name(cluster_list, cluster_name):
"""Helper function to retrieve the ID and output bucket of a cluster by
name."""
cluster = [c for c in cluster_list if c['clusterName'] == cluster_name][0]
return cluster['clusterUuid'], cluster['config']['configBucket']
# [START submit_pyspark_job]
def submit_pyspark_job(dataproc, project, cluster_name, bucket_name, filename):
"""Submits the Pyspark job to the cluster, assuming `filename` has
already been uploaded to `bucket_name`"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)
}
}
}
result = dataproc.projects().regions().jobs().submit(
projectId=project,
region=REGION,
body=job_details).execute()
job_id = result['reference']['jobId']
print('Submitted job ID {}'.format(job_id))
return job_id
# [END submit_pyspark_job]
# [START delete]
def delete_cluster(dataproc, project, cluster):
print('Tearing down cluster')
result = dataproc.projects().regions().clusters().delete(
projectId=project,
region=REGION,
clusterName=cluster).execute()
return result
# [END delete]
# [START wait]
def wait_for_job(dataproc, project, job_id):
print('Waiting for job to finish...')
while True:
result = dataproc.projects().regions().jobs().get(
projectId=project,
region=REGION,
jobId=job_id).execute()
# Handle exceptions
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
print('Job finished')
return result
# [END wait]
# [START get_client]
def get_client():
"""Builds an http client authenticated with the service account
credentials."""
dataproc = googleapiclient.discovery.build('dataproc', 'v1')
return dataproc
# [END get_client]
def main(project_id, zone, cluster_name, bucket_name, pyspark_file=None):
dataproc = get_client()
try:
if pyspark_file:
spark_file, spark_filename = get_pyspark_file(pyspark_file)
else:
spark_file, spark_filename = get_default_pyspark_file()
create_cluster(dataproc, project_id, cluster_name, zone)
wait_for_cluster_creation(dataproc, project_id, cluster_name, zone)
upload_pyspark_file(project_id, bucket_name,
spark_filename, spark_file)
cluster_list = list_clusters_with_details(
dataproc, project_id)['clusters']
(cluster_id, output_bucket) = (
get_cluster_id_by_name(cluster_list, cluster_name))
# [START call_submit_pyspark_job]
job_id = submit_pyspark_job(
dataproc, project_id, cluster_name, bucket_name, spark_filename)
# [END call_submit_pyspark_job]
wait_for_job(dataproc, project_id, job_id)
output = download_output(project_id, cluster_id, output_bucket, job_id)
print('Received job output {}'.format(output))
return output
finally:
delete_cluster(dataproc, project_id, cluster_name)
spark_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project_id', help='Project ID you want to access.', required=True),
parser.add_argument(
'--zone', help='Region to create clusters in', required=True)
parser.add_argument(
'--cluster_name', help='Name of the cluster to create', required=True)
parser.add_argument(
'--gcs_bucket', help='Bucket to upload Pyspark file to', required=True)
parser.add_argument(
'--pyspark_file', help='Pyspark filename. Defaults to pyspark_sort.py')
args = parser.parse_args()
main(
args.project_id, args.zone,
args.cluster_name, args.gcs_bucket, args.pyspark_file)
| 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-26 14:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flows', '0097_interrupt_runs_for_archived_flows'),
]
operations = [
migrations.CreateModel(
name='FlowPathRecentMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_uuid', models.UUIDField(help_text='Which flow node they came from')),
('to_uuid', models.UUIDField(help_text='Which flow node they went to')),
('text', models.CharField(max_length=640)),
('created_on', models.DateTimeField(help_text='When the message arrived')),
('run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recent_messages', to='flows.FlowRun')),
],
),
]
| 0.004739 |
"""
:mod:`jedi.evaluate.imports` is here to resolve import statements and return
the modules/classes/functions/whatever, which they stand for. However there's
not any actual importing done. This module is about finding modules in the
filesystem. This can be quite tricky sometimes, because Python imports are not
always that simple.
This module uses imp for python up to 3.2 and importlib for python 3.3 on; the
correct implementation is delegated to _compatibility.
This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (curser at the end would return ``datetime``).
"""
import imp
import os
import pkgutil
import sys
from itertools import chain
from jedi._compatibility import find_module, unicode
from jedi import common
from jedi import debug
from jedi import cache
from jedi.parser import fast
from jedi.parser import tree
from jedi.evaluate import sys_path
from jedi.evaluate import helpers
from jedi import settings
from jedi.common import source_to_unicode
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate.cache import memoize_default, NO_DEFAULT
def completion_names(evaluator, imp, pos):
name = imp.name_for_position(pos)
module = evaluator.wrap(imp.get_parent_until())
if name is None:
level = 0
for node in imp.children:
if node.end_pos <= pos:
if node in ('.', '...'):
level += len(node.value)
import_path = []
else:
# Completion on an existing name.
# The import path needs to be reduced by one, because we're completing.
import_path = imp.path_for_name(name)[:-1]
level = imp.level
importer = Importer(evaluator, tuple(import_path), module, level)
if isinstance(imp, tree.ImportFrom):
c = imp.children
only_modules = c[c.index('import')].start_pos >= pos
else:
only_modules = True
return importer.completion_names(evaluator, only_modules)
class ImportWrapper(tree.Base):
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
self._import = name.get_parent_until(tree.Import)
self.import_path = self._import.path_for_name(name)
@memoize_default()
def follow(self, is_goto=False):
if self._evaluator.recursion_detector.push_stmt(self._import):
# check recursion
return []
try:
module = self._evaluator.wrap(self._import.get_parent_until())
import_path = self._import.path_for_name(self._name)
from_import_name = None
try:
from_names = self._import.get_from_names()
except AttributeError:
# Is an import_name
pass
else:
if len(from_names) + 1 == len(import_path):
# We have to fetch the from_names part first and then check
# if from_names exists in the modules.
from_import_name = import_path[-1]
import_path = from_names
importer = Importer(self._evaluator, tuple(import_path),
module, self._import.level)
types = importer.follow()
#if self._import.is_nested() and not self.nested_resolve:
# scopes = [NestedImportModule(module, self._import)]
if from_import_name is not None:
types = list(chain.from_iterable(
self._evaluator.find_types(t, unicode(from_import_name),
is_goto=is_goto)
for t in types))
if not types:
path = import_path + [from_import_name]
importer = Importer(self._evaluator, tuple(path),
module, self._import.level)
types = importer.follow()
# goto only accepts `Name`
if is_goto:
types = [s.name for s in types]
else:
# goto only accepts `Name`
if is_goto:
types = [s.name for s in types]
debug.dbg('after import: %s', types)
finally:
self._evaluator.recursion_detector.pop_stmt()
return types
class NestedImportModule(tree.Module):
"""
TODO while there's no use case for nested import module right now, we might
be able to use them for static analysis checks later on.
"""
def __init__(self, module, nested_import):
self._module = module
self._nested_import = nested_import
def _get_nested_import_name(self):
"""
Generates an Import statement, that can be used to fake nested imports.
"""
i = self._nested_import
# This is not an existing Import statement. Therefore, set position to
# 0 (0 is not a valid line number).
zero = (0, 0)
names = [unicode(name) for name in i.namespace_names[1:]]
name = helpers.FakeName(names, self._nested_import)
new = tree.Import(i._sub_module, zero, zero, name)
new.parent = self._module
debug.dbg('Generated a nested import: %s', new)
return helpers.FakeName(str(i.namespace_names[1]), new)
def __getattr__(self, name):
return getattr(self._module, name)
def __repr__(self):
return "<%s: %s of %s>" % (self.__class__.__name__, self._module,
self._nested_import)
def _add_error(evaluator, name, message=None):
if hasattr(name, 'parent'):
# Should be a name, not a string!
analysis.add(evaluator, 'import-error', name, message)
def get_init_path(directory_path):
"""
The __init__ file can be searched in a directory. If found return it, else
None.
"""
for suffix, _, _ in imp.get_suffixes():
path = os.path.join(directory_path, '__init__' + suffix)
if os.path.exists(path):
return path
return None
class Importer(object):
def __init__(self, evaluator, import_path, module, level=0):
"""
An implementation similar to ``__import__``. Use `follow`
to actually follow the imports.
*level* specifies whether to use absolute or relative imports. 0 (the
default) means only perform absolute imports. Positive values for level
indicate the number of parent directories to search relative to the
directory of the module calling ``__import__()`` (see PEP 328 for the
details).
:param import_path: List of namespaces (strings or Names).
"""
debug.speed('import %s' % (import_path,))
self._evaluator = evaluator
self.level = level
self.module = module
try:
self.file_path = module.py__file__()
except AttributeError:
# Can be None for certain compiled modules like 'builtins'.
self.file_path = None
if level:
base = module.py__package__().split('.')
if base == ['']:
base = []
if level > len(base):
path = module.py__file__()
import_path = list(import_path)
for i in range(level):
path = os.path.dirname(path)
dir_name = os.path.basename(path)
# This is not the proper way to do relative imports. However, since
# Jedi cannot be sure about the entry point, we just calculate an
# absolute path here.
if dir_name:
import_path.insert(0, dir_name)
else:
_add_error(self._evaluator, import_path[-1])
import_path = []
# TODO add import error.
debug.warning('Attempted relative import beyond top-level package.')
else:
# Here we basically rewrite the level to 0.
import_path = tuple(base) + import_path
self.import_path = import_path
@property
def str_import_path(self):
"""Returns the import path as pure strings instead of `Name`."""
return tuple(str(name) for name in self.import_path)
@memoize_default()
def sys_path_with_modifications(self):
in_path = []
sys_path_mod = list(sys_path.sys_path_with_modifications(self._evaluator, self.module))
if self.file_path is not None:
# If you edit e.g. gunicorn, there will be imports like this:
# `from gunicorn import something`. But gunicorn is not in the
# sys.path. Therefore look if gunicorn is a parent directory, #56.
if self.import_path: # TODO is this check really needed?
for path in sys_path.traverse_parents(self.file_path):
if os.path.basename(path) == self.str_import_path[0]:
in_path.append(os.path.dirname(path))
# Since we know nothing about the call location of the sys.path,
# it's a possibility that the current directory is the origin of
# the Python execution.
sys_path_mod.insert(0, os.path.dirname(self.file_path))
return in_path + sys_path_mod
@memoize_default(NO_DEFAULT)
def follow(self):
if not self.import_path:
return []
return self._do_import(self.import_path, self.sys_path_with_modifications())
def _do_import(self, import_path, sys_path):
"""
This method is very similar to importlib's `_gcd_import`.
"""
import_parts = [str(i) for i in import_path]
# Handle "magic" Flask extension imports:
# ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
if len(import_path) > 2 and import_parts[:2] == ['flask', 'ext']:
# New style.
ipath = ('flask_' + str(import_parts[2]),) + import_path[3:]
modules = self._do_import(ipath, sys_path)
if modules:
return modules
else:
# Old style
return self._do_import(('flaskext',) + import_path[2:], sys_path)
module_name = '.'.join(import_parts)
try:
return [self._evaluator.modules[module_name]]
except KeyError:
pass
if len(import_path) > 1:
# This is a recursive way of importing that works great with
# the module cache.
bases = self._do_import(import_path[:-1], sys_path)
if not bases:
return []
# We can take the first element, because only the os special
# case yields multiple modules, which is not important for
# further imports.
base = bases[0]
# This is a huge exception, we follow a nested import
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
if [str(i) for i in import_path] == ['os', 'path']:
return self._evaluator.find_types(base, 'path')
try:
# It's possible that by giving it always the sys path (and not
# the __path__ attribute of the parent, we get wrong results
# and nested namespace packages don't work. But I'm not sure.
paths = base.py__path__(sys_path)
except AttributeError:
# The module is not a package.
_add_error(self._evaluator, import_path[-1])
return []
else:
debug.dbg('search_module %s in paths %s', module_name, paths)
for path in paths:
# At the moment we are only using one path. So this is
# not important to be correct.
try:
module_file, module_path, is_pkg = \
find_module(import_parts[-1], [path])
break
except ImportError:
module_path = None
if module_path is None:
_add_error(self._evaluator, import_path[-1])
return []
else:
try:
debug.dbg('search_module %s in %s', import_parts[-1], self.file_path)
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
sys.path, temp = sys_path, sys.path
try:
module_file, module_path, is_pkg = \
find_module(import_parts[-1])
finally:
sys.path = temp
except ImportError:
# The module is not a package.
_add_error(self._evaluator, import_path[-1])
return []
source = None
if is_pkg:
# In this case, we don't have a file yet. Search for the
# __init__ file.
module_path = get_init_path(module_path)
elif module_file:
source = module_file.read()
module_file.close()
if module_file is None and not module_path.endswith('.py'):
module = compiled.load_module(module_path)
else:
module = _load_module(self._evaluator, module_path, source, sys_path)
self._evaluator.modules[module_name] = module
return [module]
def _generate_name(self, name):
return helpers.FakeName(name, parent=self.module)
def _get_module_names(self, search_path=None):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
names = []
# add builtin module names
if search_path is None:
names += [self._generate_name(name) for name in sys.builtin_module_names]
if search_path is None:
search_path = self.sys_path_with_modifications()
for module_loader, name, is_pkg in pkgutil.iter_modules(search_path):
names.append(self._generate_name(name))
return names
def completion_names(self, evaluator, only_modules=False):
"""
:param only_modules: Indicates wheter it's possible to import a
definition that is not defined in a module.
"""
from jedi.evaluate import finder
names = []
if self.import_path:
# flask
if self.str_import_path == ('flask', 'ext'):
# List Flask extensions like ``flask_foo``
for mod in self._get_module_names():
modname = str(mod)
if modname.startswith('flask_'):
extname = modname[len('flask_'):]
names.append(self._generate_name(extname))
# Now the old style: ``flaskext.foo``
for dir in self.sys_path_with_modifications():
flaskext = os.path.join(dir, 'flaskext')
if os.path.isdir(flaskext):
names += self._get_module_names([flaskext])
for scope in self.follow():
# Non-modules are not completable.
if not scope.type == 'file_input': # not a module
continue
# namespace packages
if isinstance(scope, tree.Module) and scope.path.endswith('__init__.py'):
paths = scope.py__path__(self.sys_path_with_modifications())
names += self._get_module_names(paths)
if only_modules:
# In the case of an import like `from x.` we don't need to
# add all the variables.
if ('os',) == self.str_import_path and not self.level:
# os.path is a hardcoded exception, because it's a
# ``sys.modules`` modification.
names.append(self._generate_name('path'))
continue
for names_dict in scope.names_dicts(search_global=False):
_names = list(chain.from_iterable(names_dict.values()))
if not _names:
continue
_names = finder.filter_definition_names(_names, scope)
names += _names
else:
# Empty import path=completion after import
if not self.level:
names += self._get_module_names()
if self.file_path is not None:
path = os.path.abspath(self.file_path)
for i in range(self.level - 1):
path = os.path.dirname(path)
names += self._get_module_names([path])
return names
def _load_module(evaluator, path=None, source=None, sys_path=None):
def load(source):
dotted_path = path and compiled.dotted_from_fs_path(path, sys_path)
if path is not None and path.endswith('.py') \
and not dotted_path in settings.auto_import_modules:
if source is None:
with open(path, 'rb') as f:
source = f.read()
else:
return compiled.load_module(path)
p = path
p = fast.FastParser(evaluator.grammar, common.source_to_unicode(source), p)
cache.save_parser(path, p)
return p.module
cached = cache.load_parser(path)
module = load(source) if cached is None else cached.module
module = evaluator.wrap(module)
return module
def add_module(evaluator, module_name, module):
if '.' not in module_name:
# We cannot add paths with dots, because that would collide with
# the sepatator dots for nested packages. Therefore we return
# `__main__` in ModuleWrapper.py__name__(), which is similar to
# Python behavior.
evaluator.modules[module_name] = module
def get_modules_containing_name(evaluator, mods, name):
"""
Search a name in the directories of modules.
"""
def check_python_file(path):
try:
return cache.parser_cache[path].parser.module
except KeyError:
try:
return check_fs(path)
except IOError:
return None
def check_fs(path):
with open(path, 'rb') as f:
source = source_to_unicode(f.read())
if name in source:
module_name = os.path.basename(path)[:-3] # Remove `.py`.
module = _load_module(evaluator, path, source)
add_module(evaluator, module_name, module)
return module
# skip non python modules
mods = set(m for m in mods if not isinstance(m, compiled.CompiledObject))
mod_paths = set()
for m in mods:
mod_paths.add(m.path)
yield m
if settings.dynamic_params_for_other_modules:
paths = set(settings.additional_dynamic_modules)
for p in mod_paths:
if p is not None:
d = os.path.dirname(p)
for entry in os.listdir(d):
if entry not in mod_paths:
if entry.endswith('.py'):
paths.add(d + os.path.sep + entry)
for p in sorted(paths):
# make testing easier, sort it - same results on every interpreter
c = check_python_file(p)
if c is not None and c not in mods and not isinstance(c, compiled.CompiledObject):
yield c
| 0.000756 |
#!/usr/bin/env python
'''
Multithreaded video processing sample.
Usage:
video_threaded.py {<video device number>|<video file name>}
Shows how python threading capabilities can be used
to organize parallel captured frame processing pipeline
for smoother playback.
Keyboard shortcuts:
ESC - exit
space - switch between multi and single threaded processing
'''
import numpy as np
import cv2
from multiprocessing.pool import ThreadPool
from collections import deque
from common import clock, draw_str, StatValue
import video
class DummyTask:
def __init__(self, data):
self.data = data
def ready(self):
return True
def get(self):
return self.data
if __name__ == '__main__':
import sys
print __doc__
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
def process_frame(frame, t0):
# some intensive computation...
frame = cv2.medianBlur(frame, 19)
frame = cv2.medianBlur(frame, 19)
return frame, t0
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
threaded_mode = True
latency = StatValue()
frame_interval = StatValue()
last_frame_time = clock()
while True:
while len(pending) > 0 and pending[0].ready():
res, t0 = pending.popleft().get()
latency.update(clock() - t0)
draw_str(res, (20, 20), "threaded : " + str(threaded_mode))
draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value*1000))
draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000))
cv2.imshow('threaded video', res)
if len(pending) < threadn:
ret, frame = cap.read()
t = clock()
frame_interval.update(t - last_frame_time)
last_frame_time = t
if threaded_mode:
task = pool.apply_async(process_frame, (frame.copy(), t))
else:
task = DummyTask(process_frame(frame, t))
pending.append(task)
ch = 0xFF & cv2.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
cv2.destroyAllWindows()
| 0.003915 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008 Zsolt Foldvari
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"Handling formatted ('rich text') strings"
from __future__ import print_function
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .styledtexttag import StyledTextTag
from ..constfunc import cuni, STRTYPE
#-------------------------------------------------------------------------
#
# StyledText class
#
#-------------------------------------------------------------------------
class StyledText(object):
"""Helper class to enable character based text formatting.
StyledText is a wrapper class binding the clear text string and it's
formatting tags together.
StyledText provides several string methods in order to manipulate
formatted strings, such as :meth:`~gen.lib.styledtext.StyledText.join`,
:meth:`~gen.lib.styledtext.StyledText.replace`,
:meth:`~gen.lib.styledtext.StyledText.split`, and also
supports the '+' operation (:meth:`~gen.lib.styledtext.StyledText.__add__`).
To get the clear text of the StyledText use the built-in str() function.
To get the list of formatting tags use the L{get_tags} method.
StyledText supports the I{creation} of formatted texts too. This feature
is intended to replace (or extend) the current report interface.
To be continued... FIXME
:ivar string: (str) The clear text part.
:ivar tags: (list of :class:`~gen.lib.styledtexttag.StyledTextTag`) Text
tags holding formatting information for the string.
:cvar POS_TEXT: Position of *string* attribute in the serialized format of
an instance.
:cvar POS_TAGS: (int) Position of *tags* attribute in the serialized format of
an instance.
:attention: The POS_<x> class variables reflect the serialized object,
they have to be updated in case the data structure or the L{serialize}
method changes!
:note:
1. There is no sanity check of tags in
:meth:`~gen.lib.styledtext.StyledText.__init__`, because when a
StyledText is displayed it is passed to a StyledTextBuffer, which
in turn will 'eat' all invalid tags (including out-of-range tags too).
2. After string methods the tags can become fragmented. That means the
same tag may appear more than once in the tag list with different ranges.
There could be a 'merge_tags' functionality in
:meth:`~gen.lib.styledtext.StyledText.__init__`, however
StyledTextBuffer will merge them automatically if the text is displayed.
"""
(POS_TEXT, POS_TAGS) = list(range(2))
def __init__(self, text="", tags=None):
"""Setup initial instance variable values."""
self._string = text
if tags:
self._tags = tags
else:
self._tags = []
# special methods
def __str__(self): return self._string.__str__()
def __repr__(self): return self._string.__repr__()
def __add__(self, other):
"""Implement '+' operation on the class.
:param other: string to concatenate to self
:type other: basestring or StyledText
:returns: concatenated strings
:returnstype: StyledText
"""
offset = len(self._string)
if isinstance(other, StyledText):
# need to join strings and merge tags
for tag in other._tags:
tag.ranges = [(start + offset, end + offset)
for (start, end) in tag.ranges]
return self.__class__("".join([self._string, other._string]),
self._tags + other._tags)
elif isinstance(other, STRTYPE):
# tags remain the same, only text becomes longer
return self.__class__("".join([self._string, other]), self._tags)
else:
return self.__class__("".join([self._string, str(other)]),
self._tags)
def __eq__(self, other):
return self._string == other._string and self._tags == other._tags
def __ne__(self, other):
return self._string != other._string or self._tags != other._tags
def __lt__(self, other):
return self._string < other._string
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __mod__(self, other):
"""Implement '%' operation on the class."""
# test whether the formatting operation is valid at all
self._string % other
result = self.__class__(self._string, self._tags)
i0 = 0
while True:
i1 = result._string.find('%', i0)
if i1 < 0:
break
if result._string[i1+1] == '(':
i2 = result._string.find(')', i1+3)
param_name = result._string[i1+2:i2]
else:
i2 = i1
param_name = None
for i3 in range(i2+1, len(result._string)):
if result._string[i3] in 'diouxXeEfFgGcrs%':
break
if param_name is not None:
param = other[param_name]
elif isinstance(other, tuple):
param = other[0]
other = other[1:]
else:
param = other
if not isinstance(param, StyledText):
param = StyledText('%' + result._string[i2+1:i3+1] % param)
(before, after) = result.split(result._string[i1:i3+1], 1)
result = before + param + after
i0 = i3 + 1
return result
# private methods
# string methods in alphabetical order:
def join(self, seq):
"""Emulate __builtin__.str.join method.
:param seq: list of strings to join
:type seq: basestring or StyledText
:returns: joined strings
:returnstype: StyledText
"""
new_string = self._string.join([str(string) for string in seq])
offset = 0
new_tags = []
self_len = len(self._string)
for text in seq:
if isinstance(text, StyledText):
for tag in text._tags:
tag.ranges = [(start + offset, end + offset)
for (start, end) in tag.ranges]
new_tags += [tag]
offset = offset + len(str(text)) + self_len
return self.__class__(new_string, new_tags)
def replace(self, old, new, count=-1):
"""Emulate __builtin__.str.replace method.
:param old: substring to be replaced
:type old: basestring or StyledText
:param new: substring to replace by
:type new: StyledText
:param count: if given, only the first count occurrences are replaced
:type count: int
:returns: copy of the string with replaced substring(s)
:returnstype: StyledText
@attention: by the correct implementation parameter I{new}
should be StyledText or basestring, however only StyledText
is currently supported.
"""
# quick and dirty solution: works only if new.__class__ == StyledText
return new.join(self.split(old, count))
def split(self, sep=None, maxsplit=-1):
"""Emulate __builtin__.str.split method.
:param sep: the delimiter string
:type seq: basestring or StyledText
:param maxsplit: if given, at most maxsplit splits are done
:type maxsplit: int
:returns: a list of the words in the string
:returnstype: list of StyledText
"""
# split the clear text first
if sep is not None:
sep = str(sep)
string_list = self._string.split(sep, maxsplit)
# then split the tags too
end_string = 0
styledtext_list = []
for string in string_list:
start_string = self._string.find(string, end_string)
end_string = start_string + len(string)
new_tags = []
for tag in self._tags:
new_tag = StyledTextTag(int(tag.name), tag.value)
for (start_tag, end_tag) in tag.ranges:
start = max(start_string, start_tag)
end = min(end_string, end_tag)
if start < end:
new_tag.ranges.append((start - start_string,
end - start_string))
if new_tag.ranges:
new_tags.append(new_tag)
styledtext_list.append(self.__class__(string, new_tags))
return styledtext_list
# other public methods
def serialize(self):
"""Convert the object to a serialized tuple of data.
:returns: Serialized format of the instance.
:returnstype: tuple
"""
if self._tags:
the_tags = [tag.serialize() for tag in self._tags]
else:
the_tags = []
return (self._string, the_tags)
def to_struct(self):
"""
Convert the data held in this object to a structure (eg,
struct) that represents all the data elements.
This method is used to recursively convert the object into a
self-documenting form that can easily be used for various
purposes, including diffs and queries.
These structures may be primitive Python types (string,
integer, boolean, etc.) or complex Python types (lists,
tuples, or dicts). If the return type is a dict, then the keys
of the dict match the fieldname of the object. If the return
struct (or value of a dict key) is a list, then it is a list
of structs. Otherwise, the struct is just the value of the
attribute.
:returns: Returns a struct containing the data of the object.
:rtype: dict
"""
if self._tags:
the_tags = [tag.to_struct() for tag in self._tags]
else:
the_tags = []
return {"string": self._string,
"tags": the_tags}
def unserialize(self, data):
"""Convert a serialized tuple of data to an object.
:param data: Serialized format of instance variables.
:type data: tuple
"""
(self._string, the_tags) = data
# I really wonder why this doesn't work... it does for all other types
#self._tags = [StyledTextTag().unserialize(tag) for tag in the_tags]
for tag in the_tags:
stt = StyledTextTag()
stt.unserialize(tag)
self._tags.append(stt)
return self
def get_tags(self):
"""Return the list of formatting tags.
:returns: The formatting tags applied on the text.
:returnstype: list of 0 or more :class:`~gen.lib.styledtexttag.StyledTextTag` instances.
"""
return self._tags
def get_string(self):
"""
Accessor for the associated string.
"""
return self._string
tags = property(get_tags)
string = property(get_string)
if __name__ == '__main__':
from .styledtexttagtype import StyledTextTagType
T1 = StyledTextTag(StyledTextTagType(1), 'v1', [(0, 2), (2, 4), (4, 6)])
T2 = StyledTextTag(StyledTextTagType(2), 'v2', [(1, 3), (3, 5), (0, 7)])
A = StyledText('123X456', [T1])
B = StyledText("abcXdef", [T2])
C = StyledText('\n')
S = 'cleartext'
C = C.join([A, S, B])
L = C.split()
C = C.replace('X', StyledText('_'))
A = A + B
print(A)
| 0.005129 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'HostVirtualDNSDriver'
]
import sys
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.compute.drivers.hostvirtual import API_ROOT
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl']
class HostVirtualDNSResponse(HostVirtualResponse):
def parse_error(self):
context = self.connection.context
status = int(self.status)
if status == httplib.NOT_FOUND:
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(
value=self.parse_body()['error']['message'],
driver=self, zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(
value=self.parse_body()['error']['message'],
driver=self, record_id=context['id'])
super(HostVirtualDNSResponse, self).parse_error()
return self.body
class HostVirtualDNSConnection(HostVirtualConnection):
responseCls = HostVirtualDNSResponse
class HostVirtualDNSDriver(DNSDriver):
type = Provider.HOSTVIRTUAL
name = 'Host Virtual DNS'
website = 'https://www.hostvirtual.com/'
connectionCls = HostVirtualDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def __init__(self, key, secure=True, host=None, port=None):
super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def _to_zones(self, items):
zones = []
for item in items:
zones.append(self._to_zone(item))
return zones
def _to_zone(self, item):
extra = {}
if 'records' in item:
extra['records'] = item['records']
if item['type'] == 'NATIVE':
item['type'] = 'master'
zone = Zone(id=item['id'], domain=item['name'],
type=item['type'], ttl=item['ttl'],
driver=self, extra=extra)
return zone
def _to_records(self, items, zone=None):
records = []
for item in items:
records.append(self._to_record(item=item, zone=zone))
return records
def _to_record(self, item, zone=None):
extra = {'ttl': item['ttl']}
type = self._string_to_record_type(item['type'])
name = item['name'][:-len(zone.domain) - 1]
record = Record(id=item['id'], name=name,
type=type, data=item['content'],
zone=zone, driver=self, extra=extra)
return record
def list_zones(self):
result = self.connection.request(
API_ROOT + '/dns/zones/').object
zones = self._to_zones(result)
return zones
def list_records(self, zone):
params = {'id': zone.id}
self.connection.set_context({'resource': 'zone', 'id': zone.id})
try:
result = self.connection.request(
API_ROOT + '/dns/records/', params=params).object
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
if 'Not Found: No Records Found' in e.value:
return []
raise e
records = self._to_records(items=result, zone=zone)
return records
def get_zone(self, zone_id):
params = {'id': zone_id}
self.connection.set_context({'resource': 'zone', 'id': zone_id})
result = self.connection.request(
API_ROOT + '/dns/zone/', params=params).object
if 'id' not in result:
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
zone = self._to_zone(result)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
params = {'id': record_id}
self.connection.set_context({'resource': 'record', 'id': record_id})
result = self.connection.request(
API_ROOT + '/dns/record/', params=params).object
if 'id' not in result:
raise RecordDoesNotExistError(value='',
driver=self, record_id=record_id)
record = self._to_record(item=result, zone=zone)
return record
def delete_zone(self, zone):
params = {'id': zone.id}
self.connection.set_context({'resource': 'zone', 'id': zone.id})
result = self.connection.request(
API_ROOT + '/dns/zone/', params=params, method='DELETE').object
return bool(result)
def delete_record(self, record):
params = {'id': record.id}
self.connection.set_context({'resource': 'record', 'id': record.id})
result = self.connection.request(
API_ROOT + '/dns/record/', params=params, method='DELETE').object
return bool(result)
def create_zone(self, domain, type='NATIVE', ttl=None, extra=None):
if type == 'master':
type = 'NATIVE'
elif type == 'slave':
type = 'SLAVE'
params = {'name': domain, 'type': type, 'ttl': ttl}
result = self.connection.request(
API_ROOT + '/dns/zone/',
data=json.dumps(params), method='POST').object
extra = {
'soa': result['soa'],
'ns': result['ns']
}
zone = Zone(id=result['id'], domain=domain,
type=type, ttl=ttl, extra=extra, driver=self)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
params = {'id': zone.id}
if domain:
params['name'] = domain
if type:
params['type'] = type
self.connection.set_context({'resource': 'zone', 'id': zone.id})
self.connection.request(API_ROOT + '/dns/zone/',
data=json.dumps(params), method='PUT').object
updated_zone = get_new_obj(
obj=zone, klass=Zone,
attributes={
'domain': domain,
'type': type,
'ttl': ttl,
'extra': extra
})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
params = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'domain_id': zone.id,
'content': data
}
merged = merge_valid_keys(
params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra
)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
result = self.connection.request(
API_ROOT + '/dns/record/',
data=json.dumps(params), method='POST').object
record = Record(id=result['id'], name=name,
type=type, data=data,
extra=merged, zone=zone, driver=self)
return record
def update_record(self, record, name=None, type=None,
data=None, extra=None):
params = {
'domain_id': record.zone.id,
'record_id': record.id
}
if name:
params['name'] = name
if data:
params['content'] = data
if type is not None:
params['type'] = self.RECORD_TYPE_MAP[type]
merged = merge_valid_keys(
params=params,
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra
)
self.connection.set_context({'resource': 'record', 'id': record.id})
self.connection.request(API_ROOT + '/dns/record/',
data=json.dumps(params), method='PUT').object
updated_record = get_new_obj(
obj=record, klass=Record, attributes={
'name': name, 'data': data,
'type': type,
'extra': merged
})
return updated_record
| 0.000108 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Protocol buffer support for message types.
For more details about protocol buffer encoding and decoding please see:
http://code.google.com/apis/protocolbuffers/docs/encoding.html
Public Exceptions:
DecodeError: Raised when a decode error occurs from incorrect protobuf format.
Public Functions:
encode_message: Encodes a message in to a protocol buffer string.
decode_message: Decode from a protocol buffer string to a message.
"""
import array
from . import message_types
from . import messages
from . import util
from .google_imports import ProtocolBuffer
__all__ = ['ALTERNATIVE_CONTENT_TYPES',
'CONTENT_TYPE',
'encode_message',
'decode_message',
]
CONTENT_TYPE = 'application/octet-stream'
ALTERNATIVE_CONTENT_TYPES = ['application/x-google-protobuf']
class _Encoder(ProtocolBuffer.Encoder):
"""Extension of protocol buffer encoder.
Original protocol buffer encoder does not have complete set of methods
for handling required encoding. This class adds them.
"""
# TODO(user): Implement the missing encoding types.
def no_encoding(self, value):
"""No encoding available for type.
Args:
value: Value to encode.
Raises:
NotImplementedError at all times.
"""
raise NotImplementedError()
def encode_enum(self, value):
"""Encode an enum value.
Args:
value: Enum to encode.
"""
self.putVarInt32(value.number)
def encode_message(self, value):
"""Encode a Message in to an embedded message.
Args:
value: Message instance to encode.
"""
self.putPrefixedString(encode_message(value))
def encode_unicode_string(self, value):
"""Helper to properly pb encode unicode strings to UTF-8.
Args:
value: String value to encode.
"""
if isinstance(value, unicode):
value = value.encode('utf-8')
self.putPrefixedString(value)
class _Decoder(ProtocolBuffer.Decoder):
"""Extension of protocol buffer decoder.
Original protocol buffer decoder does not have complete set of methods
for handling required decoding. This class adds them.
"""
# TODO(user): Implement the missing encoding types.
def no_decoding(self):
"""No decoding available for type.
Raises:
NotImplementedError at all times.
"""
raise NotImplementedError()
def decode_string(self):
"""Decode a unicode string.
Returns:
Next value in stream as a unicode string.
"""
return self.getPrefixedString().decode('UTF-8')
def decode_boolean(self):
"""Decode a boolean value.
Returns:
Next value in stream as a boolean.
"""
return bool(self.getBoolean())
# Number of bits used to describe a protocol buffer bits used for the variant.
_WIRE_TYPE_BITS = 3
_WIRE_TYPE_MASK = 7
# Maps variant to underlying wire type. Many variants map to same type.
_VARIANT_TO_WIRE_TYPE = {
messages.Variant.DOUBLE: _Encoder.DOUBLE,
messages.Variant.FLOAT: _Encoder.FLOAT,
messages.Variant.INT64: _Encoder.NUMERIC,
messages.Variant.UINT64: _Encoder.NUMERIC,
messages.Variant.INT32: _Encoder.NUMERIC,
messages.Variant.BOOL: _Encoder.NUMERIC,
messages.Variant.STRING: _Encoder.STRING,
messages.Variant.MESSAGE: _Encoder.STRING,
messages.Variant.BYTES: _Encoder.STRING,
messages.Variant.UINT32: _Encoder.NUMERIC,
messages.Variant.ENUM: _Encoder.NUMERIC,
messages.Variant.SINT32: _Encoder.NUMERIC,
messages.Variant.SINT64: _Encoder.NUMERIC,
}
# Maps variant to encoder method.
_VARIANT_TO_ENCODER_MAP = {
messages.Variant.DOUBLE: _Encoder.putDouble,
messages.Variant.FLOAT: _Encoder.putFloat,
messages.Variant.INT64: _Encoder.putVarInt64,
messages.Variant.UINT64: _Encoder.putVarUint64,
messages.Variant.INT32: _Encoder.putVarInt32,
messages.Variant.BOOL: _Encoder.putBoolean,
messages.Variant.STRING: _Encoder.encode_unicode_string,
messages.Variant.MESSAGE: _Encoder.encode_message,
messages.Variant.BYTES: _Encoder.encode_unicode_string,
messages.Variant.UINT32: _Encoder.no_encoding,
messages.Variant.ENUM: _Encoder.encode_enum,
messages.Variant.SINT32: _Encoder.no_encoding,
messages.Variant.SINT64: _Encoder.no_encoding,
}
# Basic wire format decoders. Used for reading unknown values.
_WIRE_TYPE_TO_DECODER_MAP = {
_Encoder.NUMERIC: _Decoder.getVarInt64,
_Encoder.DOUBLE: _Decoder.getDouble,
_Encoder.STRING: _Decoder.getPrefixedString,
_Encoder.FLOAT: _Decoder.getFloat,
}
# Map wire type to variant. Used to find a variant for unknown values.
_WIRE_TYPE_TO_VARIANT_MAP = {
_Encoder.NUMERIC: messages.Variant.INT64,
_Encoder.DOUBLE: messages.Variant.DOUBLE,
_Encoder.STRING: messages.Variant.STRING,
_Encoder.FLOAT: messages.Variant.FLOAT,
}
# Wire type to name mapping for error messages.
_WIRE_TYPE_NAME = {
_Encoder.NUMERIC: 'NUMERIC',
_Encoder.DOUBLE: 'DOUBLE',
_Encoder.STRING: 'STRING',
_Encoder.FLOAT: 'FLOAT',
}
# Maps variant to decoder method.
_VARIANT_TO_DECODER_MAP = {
messages.Variant.DOUBLE: _Decoder.getDouble,
messages.Variant.FLOAT: _Decoder.getFloat,
messages.Variant.INT64: _Decoder.getVarInt64,
messages.Variant.UINT64: _Decoder.getVarUint64,
messages.Variant.INT32: _Decoder.getVarInt32,
messages.Variant.BOOL: _Decoder.decode_boolean,
messages.Variant.STRING: _Decoder.decode_string,
messages.Variant.MESSAGE: _Decoder.getPrefixedString,
messages.Variant.BYTES: _Decoder.getPrefixedString,
messages.Variant.UINT32: _Decoder.no_decoding,
messages.Variant.ENUM: _Decoder.getVarInt32,
messages.Variant.SINT32: _Decoder.no_decoding,
messages.Variant.SINT64: _Decoder.no_decoding,
}
def encode_message(message):
"""Encode Message instance to protocol buffer.
Args:
Message instance to encode in to protocol buffer.
Returns:
String encoding of Message instance in protocol buffer format.
Raises:
messages.ValidationError if message is not initialized.
"""
message.check_initialized()
encoder = _Encoder()
# Get all fields, from the known fields we parsed and the unknown fields
# we saved. Note which ones were known, so we can process them differently.
all_fields = [(field.number, field) for field in message.all_fields()]
all_fields.extend((key, None)
for key in message.all_unrecognized_fields()
if isinstance(key, (int, long)))
all_fields.sort()
for field_num, field in all_fields:
if field:
# Known field.
value = message.get_assigned_value(field.name)
if value is None:
continue
variant = field.variant
repeated = field.repeated
else:
# Unrecognized field.
value, variant = message.get_unrecognized_field_info(field_num)
if not isinstance(variant, messages.Variant):
continue
repeated = isinstance(value, (list, tuple))
tag = ((field_num << _WIRE_TYPE_BITS) | _VARIANT_TO_WIRE_TYPE[variant])
# Write value to wire.
if repeated:
values = value
else:
values = [value]
for next in values:
encoder.putVarInt32(tag)
if isinstance(field, messages.MessageField):
next = field.value_to_message(next)
field_encoder = _VARIANT_TO_ENCODER_MAP[variant]
field_encoder(encoder, next)
return encoder.buffer().tostring()
def decode_message(message_type, encoded_message):
"""Decode protocol buffer to Message instance.
Args:
message_type: Message type to decode data to.
encoded_message: Encoded version of message as string.
Returns:
Decoded instance of message_type.
Raises:
DecodeError if an error occurs during decoding, such as incompatible
wire format for a field.
messages.ValidationError if merged message is not initialized.
"""
message = message_type()
message_array = array.array('B')
message_array.fromstring(encoded_message)
try:
decoder = _Decoder(message_array, 0, len(message_array))
while decoder.avail() > 0:
# Decode tag and variant information.
encoded_tag = decoder.getVarInt32()
tag = encoded_tag >> _WIRE_TYPE_BITS
wire_type = encoded_tag & _WIRE_TYPE_MASK
try:
found_wire_type_decoder = _WIRE_TYPE_TO_DECODER_MAP[wire_type]
except:
raise messages.DecodeError('No such wire type %d' % wire_type)
if tag < 1:
raise messages.DecodeError('Invalid tag value %d' % tag)
try:
field = message.field_by_number(tag)
except KeyError:
# Unexpected tags are ok.
field = None
wire_type_decoder = found_wire_type_decoder
else:
expected_wire_type = _VARIANT_TO_WIRE_TYPE[field.variant]
if expected_wire_type != wire_type:
raise messages.DecodeError('Expected wire type %s but found %s' % (
_WIRE_TYPE_NAME[expected_wire_type],
_WIRE_TYPE_NAME[wire_type]))
wire_type_decoder = _VARIANT_TO_DECODER_MAP[field.variant]
value = wire_type_decoder(decoder)
# Save unknown fields and skip additional processing.
if not field:
# When saving this, save it under the tag number (which should
# be unique), and set the variant and value so we know how to
# interpret the value later.
variant = _WIRE_TYPE_TO_VARIANT_MAP.get(wire_type)
if variant:
message.set_unrecognized_field(tag, value, variant)
continue
# Special case Enum and Message types.
if isinstance(field, messages.EnumField):
try:
value = field.type(value)
except TypeError:
raise messages.DecodeError('Invalid enum value %s' % value)
elif isinstance(field, messages.MessageField):
value = decode_message(field.message_type, value)
value = field.value_from_message(value)
# Merge value in to message.
if field.repeated:
values = getattr(message, field.name)
if values is None:
setattr(message, field.name, [value])
else:
values.append(value)
else:
setattr(message, field.name, value)
except ProtocolBuffer.ProtocolBufferDecodeError, err:
raise messages.DecodeError('Decoding error: %s' % str(err))
message.check_initialized()
return message
| 0.006861 |
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
import sys, os, getopt
module_init_template = \
'/* -*- Mode: C; c-basic-offset: 4 -*- */\n' + \
'#ifdef HAVE_CONFIG_H\n' + \
'# include "config.h"\n' + \
'#endif\n' + \
'#include <Python.h>\n' + \
'#include <pygtk.h>\n' + \
'\n' + \
'/* include any extra headers needed here */\n' + \
'\n' + \
'void %(prefix)s_register_classes(PyObject *d);\n' + \
'extern PyMethodDef %(prefix)s_functions[];\n' + \
'\n' + \
'DL_EXPORT(void)\n' + \
'init%(module)s(void)\n' + \
'{\n' + \
' PyObject *m, *d;\n' + \
'\n' + \
' /* perform any initialisation required by the library here */\n' + \
'\n' + \
' m = Py_InitModule("%(module)s", %(prefix)s_functions);\n' + \
' d = PyModule_GetDict(m);\n' + \
'\n' + \
' init_pygtk();\n' + \
'\n' + \
' %(prefix)s_register_classes(d);\n' + \
'\n' + \
' /* add anything else to the module dictionary (such as constants) */\n' +\
'\n' + \
' if (PyErr_Occurred())\n' + \
' Py_FatalError("could not initialise module %(module)s");\n' + \
'}\n'
override_template = \
'/* -*- Mode: C; c-basic-offset: 4 -*- */\n' + \
'%%%%\n' + \
'headers\n' + \
'/* include any required headers here */\n' + \
'%%%%\n' + \
'init\n' + \
' /* include any code here that needs to be executed before the\n' + \
' * extension classes get initialised */\n' + \
'%%%%\n' + \
'\n' + \
'/* you should add appropriate ignore, ignore-glob and\n' + \
' * override sections here */\n'
def open_with_backup(file):
if os.path.exists(file):
try:
os.rename(file, file+'~')
except OSError:
# fail silently if we can't make a backup
pass
return open(file, 'w')
def write_skels(fileprefix, prefix, module):
fp = open_with_backup(fileprefix+'module.c')
fp.write(module_init_template % { 'prefix': prefix, 'module': module })
fp.close()
fp = open_with_backup(fileprefix+'.override')
fp.write(override_template % { 'prefix': prefix, 'module': module })
fp.close()
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'f:p:m:h',
['file-prefix=', 'prefix=', 'module=', 'help'])
fileprefix = None
prefix = None
module = None
for opt, arg in opts:
if opt in ('-f', '--file-prefix'):
fileprefix = arg
elif opt in ('-p', '--prefix'):
prefix = arg
elif opt in ('-m', '--module'):
module = arg
elif opt in ('-h', '--help'):
print 'usage: mkskel.py -f fileprefix -p prefix -m module'
sys.exit(0)
if not fileprefix or not prefix or not module:
print 'usage: mkskel.py -f fileprefix -p prefix -m module'
sys.exit(1)
write_skels(fileprefix, prefix, module)
| 0.018195 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 17:36:51 2016
Draw integrated energy plot
@author: bottero
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import os,sys,glob,shutil
import argparse # To deal with arguments :
# https://docs.python.org/2/library/argparse.html
import scipy.ndimage
from scipy import interpolate
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def representsFloat(s):
try:
float(s)
return True
except ValueError:
return False
class ParFile:
""" This class is used to store the data contained on a specfem2d
par_file"""
def __init__(self,pathToParFile):
""" Constructor (what happen when we do aParFile=ParFile(path)) """
self.path=pathToParFile
self.nt=''
self.dt=''
if os.path.exists(self.path):
self.readParFile()
else:
raise IOError('Impossible to read '+pathToParFile)
def readParFile(self):
""" Open a the par_file and read some values """
with open(self.path) as parFile:
for line in parFile:
if 'NSTEP=' in line.replace(" ", ""):
self.nt=line.split(" = ")[1].split("#")[0].strip()
if representsInt(self.nt): # Verify that the string extracted is a int
self.nt = int(self.nt)
else:
raise ValueError('Incorrect value of NSTEP read')
if 'DT=' in line.replace(" ", ""):
self.dt=line.split(" = ")[1].split("#")[0].strip()
self.dt=self.dt.replace("d","e").replace("D","E") # Convert to real scientific fomat
if representsFloat(self.dt): # Verify that the string extracted is a int
self.dt = float(self.dt)
else:
raise ValueError('Incorrect value of DT read')
def find_index(x,z,xil,zil):
"""Return the indices of the closest point in 2D array"""
idxX=np.searchsorted(xil,x)
idxZ=np.searchsorted(zil,z)
return idxX,idxZ
def interpolateValue(array,xil,zil,x,z):
"""Return the value of the 2D field described by array,xil and zil at (x,z)"""
idxX,idxZ = find_index(x,z,xil,zil)
xLine, zLine = np.array([idxX]), np.array([idxZ])
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.map_coordinates(array, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.map_coordinates(array.filled(), np.vstack((zLine,xLine)),order=1)
return zi[0]
####################### PARSE ARGUMENTS #######################
# Here we read the argument given and we check them
parser = argparse.ArgumentParser(
description="This script plot the files total_integrated_energy_fieldXXXXX representing the energy that have crossed each point")
parser.add_argument("--input_directory","-d",type=str,default="./",
help="input_directory: directory where we can find the files total_integrated_energy_fieldXXXXX")
parser.add_argument("--par_file_directory",type=str,default="../",
help="par_file_directory: directory where we can find the Par_file of the run. Default: input_directory/../")
parser.add_argument("--name_of_files","-n",type=str,default="total_integrated_energy_field",
help="name_of_files: to plot something different than total_integrated_energy_fieldXXXXX")
parser.add_argument('-p','--profiles', action='store_true',
help='profiles: calculate energy profiles')
parser.add_argument('-nc','--no_concatenate_files', action='store_true',
help='no_concatenate_files: don t concatenate files at the beginning of the script')
parser.add_argument('-nl','--nolog', action='store_true',
help='nolog: no apply log')
parser.add_argument("--title","-t",type=str,default="",
help="title : title of the figures")
parser.add_argument("-nx",type=int,default=300,
help="nx: number of x points for interpolated field")
parser.add_argument("-nz",type=int,default=300,
help="nz: number of z points for interpolated field")
parser.add_argument('-w','--writeInterpolatedField', action='store_true',
help='writeInterpolatedField: write Interpolated field on file and integrated energy a a function of range if needed')
parser.add_argument('-sp','--saveOneProfile',nargs=4,
help='saveOneProfile: Save one X or Z profile to file (set it in source code). -sp x0 x1 z0 z1. Ex: -sp 0.0 10000.0 -300 -300')
parser.add_argument('-q','--quickDisplay', action='store_true',
help='quickDisplay: display after a simple linear interpolation')
parser.add_argument('--displayPoints', action='store_true',
help='displayPoints: plot the data points')
parser.add_argument('--clim',nargs=2,default=[],
help='clim: set limits of the color bar')
parser.add_argument('--xlim',nargs=2,default=[],
help='xlim: set the x limits of the plots')
parser.add_argument('--zlim',nargs=2,default=[],
help='zlim: set the z limits of the plots')
parser.add_argument('--ref',nargs=2,default=[300,-2500],
help='ref: Point that we set to 1')
parser.add_argument('--rgs', action='store_true',
help='rgs: Compensate geometrical speading by multiplying by r')
parser.add_argument('--nxnzProfiles',nargs=2,default=[10,10],
help='nxnz: set the number of x and z profiles to plot')
parser.add_argument('--noplot', action='store_true',
help='noplot: do not plot anything')
parser.add_argument("--substract","-s",type=str,default="",
help="substract : substract the field with the field given here")
parser.add_argument('-r','--reset', action='store_true',
help='reset: delete all field previously built')
parser.add_argument('-v','--verbose', action='store_true',
help='verbose: display more information')
args = parser.parse_args()
directory=args.input_directory
fontsize = 14
zminProfiles = -10000
zmaxProfiles = -100 #-200 # TODO (-650m for 0.5Hz, -300m for 2Hz...)
num = 2000 # Number of points to describe the profiles
# Check
if not os.path.isdir(directory):
print("Wrong directory! "+directory)
sys.exit(0)
if directory[-1] != '/': #If the path given don't finish by slash...
directory=directory+'/' #... we add one
if args.par_file_directory:
if args.par_file_directory[-1] != '/': #If the path given don't finish by slash...
par_file_directory=args.par_file_directory+'/' #... we add one
else:
par_file_directory = args.par_file_directory
else:
par_file_directory = directory+"../../"
if args.name_of_files[0] == '/': # If the full path has been given
directory = ""
if not glob.glob(directory+args.name_of_files+"*"): # If we don't find any matching energy file...
print("No files "+directory+args.name_of_files+"* were found!")
sys.exit(0)
# Concatenate all the files (if specfem has been run on parallel each proc has created its own files)
if not os.path.isfile(directory+args.name_of_files+"All") or args.reset: # If the concatenation has not already been done
if args.verbose:
print("Concatenate files...")
if not args.no_concatenate_files:
with open(directory+args.name_of_files+"All", 'w') as outfile:
for infile in glob.glob(directory+args.name_of_files+"0*"): # !!Warning!! The 0 is important!! Otherwise infinite loop
shutil.copyfileobj(open(infile), outfile)
else:
print(directory+args.name_of_files+"All has been found!")
#### OPTION SUBSTRACT ####
if args.substract:
if not glob.glob(args.substract+"*"): # If we don't find any matching energy file...
print("No files "+args.substract+"* were found!")
sys.exit(0)
# Concatenate all the files (if specfem has been run on parallel each proc has created its own files)
if not os.path.isfile(args.substract+"All") or args.reset: # If the concatenation has not already been done
if args.verbose:
print("Concatenate files of substracted field...")
if not args.no_concatenate_files:
with open(args.substract+"All", 'w') as outfile:
for infile in glob.glob(args.substract+"0*"): # !!Warning!! The 0 is important!! Otherwise infinite loop
shutil.copyfileobj(open(infile), outfile)
else:
print(args.substract+"All has been found!")
##########################
if args.verbose:
print("Done")
plt.close('all')
# Load data
if args.verbose:
print("Load data in "+directory+args.name_of_files+"All")
x,z,intEnergy = np.loadtxt(directory+args.name_of_files+"All").T
#### OPTION SUBSTRACT ####
if args.substract:
if args.verbose:
print("Load data in "+args.substract+"All")
xSubstract,zSubstract,intEnergySubstract = np.loadtxt(args.substract+"All").T
##########################
#if args.verbose:
# print("Load seismograms "+directory+"AA.S0001.BXX.semv AA.S0001.BXZ.semv at 300m from the source")
#t,vx0=np.loadtxt(directory+"AA.S0001.BXX.semv").T
#t,vz0=np.loadtxt(directory+"AA.S0001.BXZ.semv").T
if args.verbose:
print("Done")
factorGs = 1.0 # Factor to compensate geometrical spreading if asked
factorGsSubstract = 1.0 # Factor to compensate geometrical spreading if asked
if args.rgs: # Remove geometrical spreading
factorGs = x
if args.substract:
factorGsSubstract = xSubstract
if "integrated" in args.name_of_files: # We have to multiply by dt
#scalingFactor = (vx0**2+vz0**2).sum()
if args.verbose:
print("Opening Par_file in ",par_file_directory,"...")
par_file=ParFile(par_file_directory+'Par_file') # Open and read the Par_file
intEnergy = intEnergy * par_file.dt * factorGs
#intEnergy = intEnergy/scalingFactor
if args.substract:
intEnergySubstract = intEnergySubstract * par_file.dt * factorGsSubstract
if "max" in args.name_of_files:
#scalingFactor = (vx0**2+vz0**2).max()
intEnergy = intEnergy * factorGs
#intEnergy = intEnergy/scalingFactor
if args.substract:
intEnergySubstract = intEnergySubstract * factorGsSubstract
mask0=~np.isinf(intEnergy)
intEnergy[~mask0]=min(intEnergy[mask0])
if args.substract:
mask0substract=~np.isinf(intEnergySubstract)
intEnergySubstract[~mask0substract]=min(intEnergySubstract[mask0substract])
nxProfiles = int(args.nxnzProfiles[0])
nzProfiles = int(args.nxnzProfiles[1])
# Color map to use:
cmap = cm.BuPu #cm.Greys #cm.BuPu
if args.clim:
climMin = float(args.clim[0])
climMax = float(args.clim[1])
# Display limits:
if args.xlim:
xmin=float(args.xlim[0])
xmax=float(args.xlim[1])
else:
xmin=x.min()
xmax=0.98*x.max()
if args.zlim:
zmin=float(args.zlim[0])
zmax=float(args.zlim[1])
else:
zmin=z.min()+0.001*(z.max()-z.min())
zmax=z.max()-0.001*(z.max()-z.min())
#print("zmin:",zmin,"zmax:",zmax)
if args.displayPoints:
if not args.noplot:
plt.plot(x,z,'o')
plt.show()
if args.quickDisplay: # Quick ways to plot the energy using the non homogeneous grid:
if not args.nolog:
intEnergy = 10.0*np.log10(intEnergy)
if not args.noplot:
#plt.tricontourf(x,z,intEnergy,20,shading='gouraud',extend="both",cmap=cmap)
plt.figure(figsize=(6,2))
plt.tripcolor(x,z,intEnergy,shading='gouraud',cmap=cmap)
plt.axis([xmin, xmax, zmin, zmax])
plt.colorbar()
if args.clim:
plt.clim(climMin,climMax)
plt.show()
sys.exit()
#cmap.set_bad('w',1.)
#%%
# Interpolation on a regular grid
# Size of regular grid
nx, nz = args.nx,args.nz
# Margins around the model
xmargin = (xmax - xmin) / 1000.0
zmargin = (zmax - zmin) / 1000.0
# Generate a regular grid to interpolate the data.
xil = np.linspace(xmin-xmargin, xmax+xmargin, nx)
zil = np.linspace(zmin-zmargin, zmax+zmargin, nz)
xi, zi = np.meshgrid(xil, zil)
#print("TODO max zil:",zil.max()," min zil:",zil.min())
if os.path.isfile(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz)) and not args.reset: # If the interpolation has already been done and written
if args.verbose:
print("Interpolated field file has been found. Loading...")
intEnergyi = np.load(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
else:
# Interpolate using delaunay triangularization:
if args.verbose:
print("Interpolation...")
intEnergyi = mlab.griddata(x,z,intEnergy,xi,zi,interp="linear")
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Writing the interpolated field to file..."+directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
intEnergyi.dump(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
#### OPTION SUBSTRACT ####
if args.substract:
if os.path.isfile(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz)) and not args.reset: # If the interpolation has already been done and written
if args.verbose:
print("Interpolated substracted field file has been found. Loading...")
intEnergyiSubstract = np.load(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
else:
# Interpolate using delaunay triangularization:
if args.verbose:
print("Interpolation of substracted file...")
intEnergyiSubstract = mlab.griddata(xSubstract,zSubstract,intEnergySubstract,xi,zi,interp="linear")
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Writing the interpolated substracted field to file..."+args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
intEnergyiSubstract.dump(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
intEnergyi = abs(intEnergyi - intEnergyiSubstract) # Substract field with given other field
##########################
intEnergyi = np.log10(intEnergyi)
# Normalize
#if "max" in args.name_of_files or "integrated" in args.name_of_files:
# if not args.substract: # TODO maybe not needed but could create problem
# if args.verbose:
# print("Normalizing...")
# valueAtRef = interpolateValue(intEnergyi,xil,zil,float(args.ref[0]),float(args.ref[1]))
# if args.verbose:
# print("Value at reference point (",args.ref[0],",",args.ref[1],") is ",valueAtRef)
# intEnergyi = intEnergyi - valueAtRef
# valueAtRef = interpolateValue(intEnergyi,xil,zil,float(args.ref[0]),float(args.ref[1]))
# if args.verbose:
# print("Value at reference point (",args.ref[0],",",args.ref[1],") is ",valueAtRef)
# if args.verbose:
# print("Done")
#Plot:
if not args.noplot:
if args.verbose:
print("Plots...")
plt.figure(1,figsize=(15,6))
plt.pcolormesh(xi,zi,intEnergyi,shading='gouraud',cmap=cmap)
plt.colorbar()
plt.axis([xmin, xmax, zmin, zmax])
if args.clim:
plt.clim(climMin,climMax)
plt.title(args.title)
font = {'family' : 'serif','size':fontsize}
plt.rc('font', **font)
plt.xlabel("Range (m)",fontsize=fontsize+3)
plt.ylabel("Depth (m)",fontsize=fontsize+3)
#plt.rc('text', usetex=True)
if args.verbose:
print("Done")
if args.profiles:
# Plot energy profiles:
if args.verbose:
print("Print profiles...")
cmap2 = plt.get_cmap('prism')
if not args.noplot and nzProfiles > 1:
plt.figure(2)
zVector=np.linspace(zminProfiles-zmaxProfiles,zmaxProfiles,nzProfiles) # z coordinates of horizontal profiles
#zVector=np.arange(zmin/(nzProfiles + 1),zmin,zmin/(nzProfiles + 1)) # z coordinates of horizontal profiles
#print("zVector:",zVector,"zmin:",zmin,"nzProfiles:",nzProfiles)
colors = [cmap2(i) for i in np.linspace(0, 1, len(zVector))] # Color vector
xvect=np.linspace(xmin,xmax,num) # x vector
for i,zz in enumerate(zVector): # loop on the depths, plot all horizontal profiles in a figure (figure 2)
x0,z0=xmin,zz
x1,z1=xmax,zz
idxX0,idxZ0 = find_index(x0,z0,xil,zil) # indices of the closest point in the 2D grid
idxX1,idxZ1 = find_index(x1,z1,xil,zil) # indices of the closest point in the 2D grid
if not args.noplot and nzProfiles > 1:
plt.figure(1)
plt.hold(True)
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num) # vector containing the indices
if args.verbose:
print("Profile 1 to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
#print("xmin:",xmin,"xmax:",xmax,"zz:",zz)
zi = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 1
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.map_coordinates(intEnergyi, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.map_coordinates(intEnergyi.filled(), np.vstack((zLine,xLine)),order=1)
if not args.noplot and nzProfiles > 1:
plt.plot([x0, x1], [z0, z1], 'o-',color=colors[i])
plt.figure(2)
plt.plot(xvect,zi,color=colors[i])
plt.xlabel("Range (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
plt.title(args.title)
if not args.noplot and nzProfiles > 1:
plt.xlim([xmin,xmax])
if not args.noplot and nxProfiles > 1:
plt.figure(3)
xVector=np.arange(xmax/(nxProfiles + 1),xmax,xmax/(nxProfiles + 1))
colors = [cmap2(i) for i in np.linspace(0, 1, len(xVector))]
z0=zminProfiles
z1=zmaxProfiles # Be careful! This point can't be too close to zmax!
zvect=np.linspace(z0,z1,num)
depthIntegratedEnergy=np.zeros(len(xVector))
#depthIntegratedEnergy2=np.zeros(len(xVector))
for i,xx in enumerate(xVector): # Loop on the ranges, plot all vertical profiles in a figure.
x0=xx
x1=xx
idxX0,idxZ0 = find_index(x0,z0,xil,zil) # indices of the closest point in the 2D grid
idxX1,idxZ1 = find_index(x1,z1,xil,zil) # indices of the closest point in the 2D grid
if not args.noplot and nxProfiles > 1:
plt.figure(1)
plt.hold(True)
if args.verbose:
print("Profile 2 to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num)
#print("xx:",xmin,"xil:",xil,"zil:",zil)
zi = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 2
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.interpolation.map_coordinates(intEnergyi, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.interpolation.map_coordinates(intEnergyi.filled(), np.vstack((zLine,xLine)),order=1)
#depthIntegratedEnergy[i]=zi2.sum()
#depthIntegratedEnergy2[i]=zi.sum()
if not args.nolog:
depthIntegratedEnergy[i]=10*np.log10(np.power(10,zi/10.0).sum())
else:
depthIntegratedEnergy[i]=np.power(10,zi/10.0).sum()
if not args.noplot and nxProfiles > 1:
plt.plot([x0, x1], [z0, z1], 'o-',color=colors[i])
plt.figure(3)
plt.plot(zi,zvect,color=colors[i]) # Without filtering
plt.xlabel("Depth (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
#plt.plot(zi2,zvect,color=colors[i])
plt.ylim([z0,z1])
plt.title(args.title)
if not args.noplot and nxProfiles > 1:
plt.figure(4)
plt.plot(xVector,depthIntegratedEnergy,'o-')
#plt.plot(xVector,depthIntegratedEnergy2,'o-')
plt.xlabel("Range (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log 10 of total energy in water",fontsize=fontsize+3)
else:
plt.ylabel("Total energy in water",fontsize=fontsize+3)
plt.title(args.title)
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Saving energy vs range...")
np.savetxt(directory+args.name_of_files+"_energy_vs_range",np.dstack((xVector,depthIntegratedEnergy))[0])
print("File ",directory+args.name_of_files+"_energy_vs_range has been written")
### SAVE ONE PROFILE ###
if args.saveOneProfile:
if args.verbose:
print("Saving one profile...")
# properties of profile to be saved (if option --sp given):
x0profile = float(args.saveOneProfile[0])
x1profile = float(args.saveOneProfile[1])
z0profile = float(args.saveOneProfile[2])
z1profile = float(args.saveOneProfile[3])
x0,z0 = x0profile,z0profile
x1,z1 = x1profile,z1profile
if z0 == z1:
vect = np.linspace(x0,x1,num)
xlabel = "Range (m)"
elif x0 == x1:
vect = np.linspace(z0,z1,num)
xlabel = "Depth (m)"
else:
sys.exit("Tilted profiles are not handled for now!")
if args.verbose:
print("Profile to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
idxX0,idxZ0 = find_index(x0,z0,xil,zil)
idxX1,idxZ1 = find_index(x1,z1,xil,zil)
if not args.noplot:
plt.figure(1)
plt.hold(True)
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num)
# Extract the values along the line, using cubic interpolation
zi1 = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 3
if type(intEnergyi) == np.ndarray:
#zi2 = scipy.ndimage.map_coordinates(np.transpose(intEnergyi), np.vstack((xLine,zLine)),order=1)
sp = interpolate.RectBivariateSpline(zil,xil,intEnergyi, kx=3, ky=3, s=7)
else:
#zi2 = scipy.ndimage.map_coordinates(np.transpose(intEnergyi).filled(), np.vstack((xLine,zLine)),order=1)
sp = interpolate.RectBivariateSpline(zil,xil,intEnergyi, kx=3, ky=3, s=7)
if x0 == x1:
zi = [float(sp([vect[i]],[x0])) for i in range(num)]
if z0 == z1:
zi = [float(sp([z0],[vect[i]])) for i in range(num)]
#print(zi2,sp([140000.0],[-2000]),sp([140000.0],[-2500]))
#depthIntegratedEnergy2[i]=zi.sum()
#if not args.nolog:
# depthIntegratedEnergy[i]=np.power(10,zi/10.0).sum()
#else:
# depthIntegratedEnergy[i]=zi.sum()
if not args.noplot:
plt.hold(True)
plt.plot([x0, x1], [z0, z1], 'o-',color="black",linewidth=3)
plt.figure(2)
plt.plot(vect,zi,color="black",linewidth=3)
plt.plot(vect,zi1,color="green",linewidth=3)
#plt.plot(vect,zi2,color="red",linewidth=3)
plt.xlabel(xlabel,fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
if nzProfiles == 1:
plt.xlim([x0,x1])
plt.title(args.title)
i = 0
#while os.path.isfile(args.input_directory+args.name_of_files+"_profile_"+str(i)): # If the a profile of this name has already been written
# i = i+1
np.savetxt(directory+args.name_of_files+"_profile_"+str(i),np.dstack((vect,zi))[0])
print("File ",directory+args.name_of_files+"_profile_"+str(i)," has been written")
if args.verbose:
print("Done")
if not args.noplot:
plt.show()
| 0.019035 |
from Queue import Empty
import couchdb
import uuid
from ghettoq.backends.base import BaseBackend
__author__ = "David Clymer <[email protected]>"
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 5984
DEFAULT_DATABASE = 'ghettoq'
def create_message_view(db):
from couchdb import design
view = design.ViewDefinition('ghettoq', 'messages', """
function (doc) {
if (doc.queue && doc.payload)
emit(doc.queue, doc);
}
""")
if not view.get_doc(db):
view.sync(db)
class CouchdbBackend(BaseBackend):
def __init__(self, host=None, port=None, user=None, password=None,
database=None, timeout=None, ssl=False):
self.ssl = ssl
if not database or database == '/':
database = DEFAULT_DATABASE
self.view_created = False
super(CouchdbBackend, self).__init__(host or DEFAULT_HOST,
port or DEFAULT_PORT,
user, password,
database or DEFAULT_DATABASE,
timeout)
def establish_connection(self):
if self.ssl:
proto = 'https'
else:
proto = 'http'
server = couchdb.Server('%s://%s:%s/' % (proto, self.host, self.port))
try:
return server.create(self.database)
except couchdb.PreconditionFailed:
return server[self.database]
def put(self, queue, message, **kwargs):
self.client.save({'_id': uuid.uuid4().hex, 'queue': queue, 'payload': message})
def _get(self, queue, **kwargs):
# If the message view is not yet set up, we'll need it now.
if not self.view_created:
create_message_view(self.client)
self.view_created = True
if not queue:
raise Empty
return self.client.view('ghettoq/messages', key=queue, **kwargs)
def get(self, queue):
result = self._get(queue, limit=1)
if not result:
raise Empty
item = result.rows[0].value
self.client.delete(item)
return item['payload']
def purge(self, queue):
result = self._get(queue)
for item in result:
self.client.delete(item.value)
return len(result)
| 0.001271 |
# Multiprocessing helper functions thanks to stackoverflow user 'klaus se''
# http://stackoverflow.com/questions/3288595/16071616#16071616
import progressbar as pb
import multiprocessing as mp
import time
import sys
import traceback
def spawn(f):
def worker(q_in, q_out, q_progress):
while True:
i, x = q_in.get()
if i is None:
break
try:
if q_progress:
res = f(x, q_progress=q_progress)
q_out.put((i, res))
else:
res = f(x)
q_out.put((i, res))
except:
print "Subprocess raised exception:"
exType, exValue, exTraceback = sys.exc_info()
traceback.print_exception(
exType, exValue, exTraceback, file=sys.stdout)
q_out.put(None)
return worker
def parmap(f, X, nprocs=mp.cpu_count(), show_progress=True, func_progress=False):
q_in = mp.Queue()
q_out = mp.Queue()
if func_progress:
q_progress = mp.Queue(100)
else:
q_progress = None
if nprocs > mp.cpu_count():
nprocs = mp.cpu_count()
proc = [mp.Process(target=spawn(f), args=(q_in, q_out, q_progress))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
if show_progress:
pbar = pb.ProgressBar(
widgets=[pb.Percentage(), ' ', pb.ETA()], maxval=len(X)).start()
[q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
n_done = 0
progress = 0
res = []
t0 = time.time()
while n_done < len(X):
if func_progress:
time.sleep(0.02)
else:
res.append(q_out.get())
n_done += 1
while not q_out.empty():
res.append(q_out.get())
n_done += 1
if q_progress:
while not q_progress.empty():
progress_increment = q_progress.get_nowait()
progress += progress_increment
else:
progress = n_done
if show_progress and progress <= len(X):
pbar.update(progress)
if show_progress:
pbar.finish()
[p.join() for p in proc]
return [x for i, x in sorted(res)]
| 0.018564 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-01-05 11:33:39
# @Email: [email protected]
# @Last modified by: oesteban
""" Encapsulates report generation functions """
from sys import version_info
import pandas as pd
from .. import logging
from ..utils.misc import BIDS_COMP
from builtins import object # pylint: disable=W0622
from io import open
MRIQC_REPORT_LOG = logging.getLogger('mriqc.report')
def gen_html(csv_file, mod, csv_failed=None, out_file=None):
import os.path as op
import datetime
from pkg_resources import resource_filename as pkgrf
from .. import __version__ as ver
from ..data import GroupTemplate
if version_info[0] > 2:
from io import StringIO as TextIO
else:
from io import BytesIO as TextIO
QCGROUPS = {
'T1w': [
(['cjv'], None),
(['cnr'], None),
(['efc'], None),
(['fber'], None),
(['wm2max'], None),
(['snr_csf', 'snr_gm', 'snr_wm'], None),
(['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
(['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'vox'),
(['qi_1', 'qi_2'], None),
(['inu_range', 'inu_med'], None),
(['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
(['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
(['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
(['summary_bg_mean', 'summary_bg_median', 'summary_bg_stdv', 'summary_bg_mad',
'summary_bg_k', 'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_csf_mean', 'summary_csf_median', 'summary_csf_stdv', 'summary_csf_mad',
'summary_csf_k', 'summary_csf_p05', 'summary_csf_p95'], None),
(['summary_gm_mean', 'summary_gm_median', 'summary_gm_stdv', 'summary_gm_mad',
'summary_gm_k', 'summary_gm_p05', 'summary_gm_p95'], None),
(['summary_wm_mean', 'summary_wm_median', 'summary_wm_stdv', 'summary_wm_mad',
'summary_wm_k', 'summary_wm_p05', 'summary_wm_p95'], None)
],
'T2w': [
(['cjv'], None),
(['cnr'], None),
(['efc'], None),
(['fber'], None),
(['wm2max'], None),
(['snr_csf', 'snr_gm', 'snr_wm'], None),
(['snrd_csf', 'snrd_gm', 'snrd_wm'], None),
(['fwhm_avg', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
(['qi_1', 'qi_2'], None),
(['inu_range', 'inu_med'], None),
(['icvs_csf', 'icvs_gm', 'icvs_wm'], None),
(['rpve_csf', 'rpve_gm', 'rpve_wm'], None),
(['tpm_overlap_csf', 'tpm_overlap_gm', 'tpm_overlap_wm'], None),
(['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_csf_mean', 'summary_csf_stdv', 'summary_csf_k',
'summary_csf_p05', 'summary_csf_p95'], None),
(['summary_gm_mean', 'summary_gm_stdv', 'summary_gm_k',
'summary_gm_p05', 'summary_gm_p95'], None),
(['summary_wm_mean', 'summary_wm_stdv', 'summary_wm_k',
'summary_wm_p05', 'summary_wm_p95'], None)
],
'bold': [
(['efc'], None),
(['fber'], None),
(['fwhm', 'fwhm_x', 'fwhm_y', 'fwhm_z'], 'mm'),
(['gsr_%s' % a for a in ['x', 'y']], None),
(['snr'], None),
(['dvars_std', 'dvars_vstd'], None),
(['dvars_nstd'], None),
(['fd_mean'], 'mm'),
(['fd_num'], '# timepoints'),
(['fd_perc'], '% timepoints'),
(['spikes_num'], '# slices'),
(['dummy_trs'], '# TRs'),
(['gcor'], None),
(['tsnr'], None),
(['aor'], None),
(['aqi'], None),
(['summary_bg_mean', 'summary_bg_stdv', 'summary_bg_k',
'summary_bg_p05', 'summary_bg_p95'], None),
(['summary_fg_mean', 'summary_fg_stdv', 'summary_fg_k',
'summary_fg_p05', 'summary_fg_p95'], None),
]
}
if csv_file.suffix == '.csv':
def_comps = list(BIDS_COMP.keys())
dataframe = pd.read_csv(csv_file, index_col=False,
dtype={comp: object for comp in def_comps})
id_labels = list(set(def_comps) & set(dataframe.columns.ravel().tolist()))
dataframe['label'] = dataframe[id_labels].apply(_format_labels, args=(id_labels,),
axis=1)
else:
dataframe = pd.read_csv(csv_file, index_col=False, sep='\t',
dtype={'bids_name': object})
dataframe = dataframe.rename(index=str, columns={'bids_name': 'label'})
nPart = len(dataframe)
failed = None
if csv_failed is not None and op.isfile(csv_failed):
MRIQC_REPORT_LOG.warning('Found failed-workflows table "%s"', csv_failed)
failed_df = pd.read_csv(csv_failed, index_col=False)
cols = list(set(id_labels) & set(failed_df.columns.ravel().tolist()))
try:
failed_df = failed_df.sort_values(by=cols)
except AttributeError:
failed_df = failed_df.sort(columns=cols)
# myfmt not defined
# failed = failed_df[cols].apply(myfmt, args=(cols,), axis=1).ravel().tolist()
csv_groups = []
datacols = dataframe.columns.ravel().tolist()
for group, units in QCGROUPS[mod]:
dfdict = {'iqm': [], 'value': [], 'label': [], 'units': []}
for iqm in group:
if iqm in datacols:
values = dataframe[[iqm]].values.ravel().tolist()
if values:
dfdict['iqm'] += [iqm] * nPart
dfdict['units'] += [units] * nPart
dfdict['value'] += values
dfdict['label'] += dataframe[['label']].values.ravel().tolist()
# Save only if there are values
if dfdict['value']:
csv_df = pd.DataFrame(dfdict)
csv_str = TextIO()
csv_df[['iqm', 'value', 'label', 'units']].to_csv(csv_str, index=False)
csv_groups.append(csv_str.getvalue())
if out_file is None:
out_file = op.abspath('group.html')
tpl = GroupTemplate()
tpl.generate_conf({
'modality': mod,
'timestamp': datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
'version': ver,
'csv_groups': csv_groups,
'failed': failed,
'boxplots_js': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'boxplots.js'))).read(),
'd3_js': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'd3.min.js'))).read(),
'boxplots_css': open(pkgrf('mriqc', op.join('data', 'reports',
'embed_resources',
'boxplots.css'))).read()
}, out_file)
return out_file
def _format_labels(row, id_labels):
"""format participant labels"""
crow = []
for col_id, prefix in list(BIDS_COMP.items()):
if col_id in id_labels:
crow.append('%s-%s' % (prefix, row[[col_id]].values[0]))
return '_'.join(crow)
| 0.001311 |
"""unit tests module for ndg.httpsclient.urllib2_build_opener module
PyOpenSSL utility to make a httplib-like interface suitable for use with
urllib2
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "06/01/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import sys
if sys.version_info[0] > 2:
from urllib.error import URLError as URLError_
else:
from urllib2 import URLError as URLError_
import unittest
from OpenSSL import SSL
from ndg.httpsclient.test import Constants
from ndg.httpsclient.urllib2_build_opener import build_opener
class Urllib2TestCase(unittest.TestCase):
"""Unit tests for urllib2 functionality"""
def test01_urllib2_build_opener(self):
opener = build_opener()
self.assertTrue(opener)
def test02_open(self):
opener = build_opener()
res = opener.open(Constants.TEST_URI)
self.assertTrue(res)
print("res = %s" % res.read())
def test03_open_fails_unknown_loc(self):
opener = build_opener()
self.assertRaises(URLError_, opener.open, Constants.TEST_URI2)
def test04_open_peer_cert_verification_fails(self):
# Explicitly set empty CA directory to make verification fail
ctx = SSL.Context(SSL.TLSv1_METHOD)
verify_callback = lambda conn, x509, errnum, errdepth, preverify_ok: \
preverify_ok
ctx.set_verify(SSL.VERIFY_PEER, verify_callback)
ctx.load_verify_locations(None, './')
opener = build_opener(ssl_context=ctx)
self.assertRaises(SSL.Error, opener.open, Constants.TEST_URI)
if __name__ == "__main__":
unittest.main()
| 0.005513 |
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# reflect updated register mappings for ARM ISA
def upgrader(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all ISA sections
if re.search('.*sys.*\.cpu.*\.isa\d*$', sec):
mr = cpt.get(sec, 'miscRegs').split()
if int(mr[0]) & 16 == 0: # CPSR reg width; 0 for AArch64
mr[112] = mr[111] # ACTLR_NS = ACTLR
mr[146] = mr[145] # ADFSR_NS = ADFSR
mr[149] = mr[148] # AIFSR_NS = AIFSR
mr[253] = mr[252] # AMAIR0_NS = AMAIR0
mr[289] = mr[288] # CNTP_CTL_NS = CNTP_CTL
mr[313] = mr[312] # CNTP_CVAL_NS = CNTP_CVAL
mr[286] = mr[285] # CNTP_TVAL_NS = CNTP_TVAL
mr[271] = mr[270] # CONTEXTIDR_NS = CONTEXTIDR
mr[104] = mr[103] # CSSELR_NS = CSSELR
mr[137] = mr[136] # DACR_NS = DACR
mr[155] = mr[154] # DFAR_NS = DFAR
mr[158] = mr[157] # IFAR_NS = IFAR
mr[143] = mr[142] # IFSR_NS = IFSR
mr[247] = mr[246] # NMRR_NS = NMRR
mr[166] = mr[165] # PAR_NS = PAR
mr[241] = mr[240] # PRRR_NS = PRRR
mr[ 4] = mr[424] # SPSR_SVC = SPSR_EL1
mr[ 7] = mr[435] # SPSR_HYP = SPSR_EL2
mr[ 5] = mr[442] # SPSR_MON = SPSR_EL3
mr[277] = mr[276] # TPIDRURO_NS = TPIDRURO
mr[280] = mr[279] # TPIDRPRW_NS = TPIDRPRW
mr[274] = mr[273] # TPIDRURW_NS = TPIDRURW
mr[132] = mr[131] # TTBCR_NS = TTBCR
mr[126] = mr[125] # TTBR0_NS = TTBR0
mr[129] = mr[128] # TTBR1_NS = TTBR1
mr[263] = mr[262] # VBAR_NS = VBAR
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
| 0.00837 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- When the host does not exists, a new host will be created, added to any host groups and linked to any templates.
- When the host already exists, the host group membership will be updated, along with the template links and interfaces.
- Delete a host from Zabbix if the host exists.
version_added: "1.9"
author: Tony Minfei Ding, Harrison Gu
requirements:
- zabbix-api python module
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
default: null
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
default: null
login_password:
description:
- Zabbix user password.
required: true
default: null
host_name:
description:
- Technical name of the host.
- If the host has already been added, the host name won't be updated.
required: true
host_groups:
description:
- List of host groups to add the host to.
required: false
link_templates:
description:
- List of templates to be linked to the host.
required: false
default: None
status:
description:
- Status and function of the host.
- Possible values are: enabled and disabled
required: false
default: "enabled"
policy:
description:
- Policy for updating pre-existing hosts.
- Possible values are: merge and replace.
- Merge will merge additional host groups and templates not already associated to a host.
- Replace will replace all host groups and templates associated to a host with only the ones specified, potentially removing the host from host groups and templates.
required: false
default: "replace"
state:
description:
- create/update or delete host.
- Possible values are: present and absent. If the host already exists, and the state is "present", just to update the host.
required: false
default: "present"
timeout:
description:
- The timeout of API request(seconds).
default: 10
interfaces:
description:
- List of interfaces to be created for the host (see example).
- Available values are: dns, ip, main, port, type and useip.
- Please review the interface documentation for more information on the supported properties:
- https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface
required: false
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
policy: merge
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
'''
import logging
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.exists({'host': host_name})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.exists({'name': group_name})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
host_list = self._zapi.host.create({'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status})
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception, e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update({'hostid': host_id, 'groups': group_ids, 'status': status})
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception, e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete({'hostid': host_id})
except Exception, e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception, e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, default=None, aliases=['url']),
login_user=dict(required=True),
login_password=dict(required=True),
host_name=dict(required=True),
host_groups=dict(required=False),
link_templates=dict(required=False),
status=dict(default="enabled"),
state=dict(default="present"),
policy=dict(default="replace"),
timeout=dict(default=10),
interfaces=dict(required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
host_name = module.params['host_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
status = module.params['status']
state = module.params['state']
policy = module.params['policy']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
# merge host groups and templates rather than replace
if policy == "merge":
exist_host_groups = host.get_host_groups_by_host_id(host_id)
if exist_host_groups:
host_groups = list( set(host_groups) | set(exist_host_groups) )
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
exist_template_ids = host.get_host_templates_by_host_id(host_id)
if exist_template_ids:
template_ids = list( set(list(template_ids)) | set(exist_template_ids) )
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces)
host.link_or_clear_template(host_id, template_ids)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
from ansible.module_utils.basic import *
main()
| 0.002739 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
# Copyright (c) 2015 Lawrence Leonard Gilbert <[email protected]>
# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al.
description:
- "The standard package manager for SmartOS, but also usable on NetBSD
or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
version_added: "1.0"
author:
- "Larry Gilbert (L2G)"
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
well. New to Ansible 1.9: check-mode support."
options:
name:
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
required: false
default: null
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
update_cache:
description:
- Update repository database. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
upgrade:
description:
- Upgrade main packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
full_upgrade:
description:
- Upgrade all packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
clean:
description:
- Clean packages cache
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
force:
description:
- Force package reinstall
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
- pkgin:
name: foo
state: present
# Update database and install "foo" package
- pkgin:
name: foo
update_cache: yes
# remove package foo
- pkgin:
name: foo
state: absent
# remove packages foo and bar
- pkgin:
name: foo,bar
state: absent
# Update repositories as a separate step
- pkgin:
update_cache: yes
# Upgrade main packages (equivalent to C(pkgin upgrade))
- pkgin:
upgrade: yes
# Upgrade all packages (equivalent to C(pkgin full-upgrade))
- pkgin:
full_upgrade: yes
# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
- pkgin:
full_upgrade: yes
force: yes
# clean packages cache (equivalent to C(pkgin clean))
- pkgin:
clean: yes
'''
import re
def query_package(module, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# test whether '-p' (parsable) flag is supported.
rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Search results may contain more than one line (e.g., 'emacs'), so iterate
# through each line to see if we have a match.
packages = out.split('\n')
for package in packages:
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
# (results in sth like 'gcc47-libs' or 'emacs24-nox11')
pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
# Do not proceed unless we have a match
if not pkg_search_obj:
continue
# Grab matched string
pkgname_without_version = pkg_search_obj.group(1)
if name != pkgname_without_version:
continue
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
# no fall-through
# No packages were matched, so return False
return False
def format_action_message(module, action, count):
vars = { "actioned": action,
"count": count }
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
else:
message = "%(actioned)s %(count)d package" % vars
if count == 1:
return message
else:
return message + "s"
def format_pkgin_command(module, command, package=None):
# Not all commands take a package argument, so cover this up by passing
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
package = ""
if module.params["force"]:
force = "-F"
else:
force = ""
vars = { "pkgin": PKGIN_PATH,
"command": command,
"package": package,
"force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "remove", package))
if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages):
install_c = 0
for package in packages:
if query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "install", package))
if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
if rc == 0:
if re.search('database for.*is up-to-date\n$', out):
return False, "datebase is up-to-date"
else:
return True, "updated repository database"
else:
module.fail_json(msg="could not update package db")
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
else:
cmd = "upgrade"
rc, out, err = module.run_command(
format_pkgin_command(module, cmd))
if rc == 0:
if re.search('^nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module):
do_upgrade_packages(module)
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
if rc == 0:
# There's no indication if 'clean' actually removed anything,
# so assume it did.
module.exit_json(changed=True, msg="cleaned caches")
else:
module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"]),
name = dict(aliases=["pkg"], type='list'),
update_cache = dict(default='no', type='bool'),
upgrade = dict(default='no', type='bool'),
full_upgrade = dict(default='no', type='bool'),
clean = dict(default='no', type='bool'),
force = dict(default='no', type='bool')),
required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode = True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p["update_cache"]:
c, msg = update_package_db(module)
if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
module.exit_json(changed=c, msg=msg)
if p["upgrade"]:
upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded packages')
if p["full_upgrade"]:
full_upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded all packages')
if p["clean"]:
clean_cache(module)
if not p['name']:
module.exit_json(changed=True, msg='cleaned caches')
pkgs = p["name"]
if p["state"] == "present":
install_packages(module, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 0.004416 |
"""Support for Minut Point."""
import logging
from homeassistant.components.alarm_control_panel import DOMAIN, AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import SUPPORT_ALARM_ARM_AWAY
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW, SIGNAL_WEBHOOK
_LOGGER = logging.getLogger(__name__)
EVENT_MAP = {
"off": STATE_ALARM_DISARMED,
"alarm_silenced": STATE_ALARM_DISARMED,
"alarm_grace_period_expired": STATE_ALARM_TRIGGERED,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Point's alarm_control_panel based on a config entry."""
async def async_discover_home(home_id):
"""Discover and add a discovered home."""
client = hass.data[POINT_DOMAIN][config_entry.entry_id]
async_add_entities([MinutPointAlarmControl(client, home_id)], True)
async_dispatcher_connect(
hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_home
)
class MinutPointAlarmControl(AlarmControlPanelEntity):
"""The platform class required by Home Assistant."""
def __init__(self, point_client, home_id):
"""Initialize the entity."""
self._client = point_client
self._home_id = home_id
self._async_unsub_hook_dispatcher_connect = None
self._changed_by = None
async def async_added_to_hass(self):
"""Call when entity is added to HOme Assistant."""
await super().async_added_to_hass()
self._async_unsub_hook_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_WEBHOOK, self._webhook_event
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
await super().async_will_remove_from_hass()
if self._async_unsub_hook_dispatcher_connect:
self._async_unsub_hook_dispatcher_connect()
@callback
def _webhook_event(self, data, webhook):
"""Process new event from the webhook."""
_type = data.get("event", {}).get("type")
_device_id = data.get("event", {}).get("device_id")
_changed_by = data.get("event", {}).get("user_id")
if (
_device_id not in self._home["devices"] and _type not in EVENT_MAP
) and _type != "alarm_silenced": # alarm_silenced does not have device_id
return
_LOGGER.debug("Received webhook: %s", _type)
self._home["alarm_status"] = _type
self._changed_by = _changed_by
self.async_write_ha_state()
@property
def _home(self):
"""Return the home object."""
return self._client.homes[self._home_id]
@property
def name(self):
"""Return name of the device."""
return self._home["name"]
@property
def state(self):
"""Return state of the device."""
return EVENT_MAP.get(self._home["alarm_status"], STATE_ALARM_ARMED_AWAY)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_AWAY
@property
def changed_by(self):
"""Return the user the last change was triggered by."""
return self._changed_by
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
status = await self._client.async_alarm_disarm(self._home_id)
if status:
self._home["alarm_status"] = "off"
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
status = await self._client.async_alarm_arm(self._home_id)
if status:
self._home["alarm_status"] = "on"
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return f"point.{self._home_id}"
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(POINT_DOMAIN, self._home_id)},
"name": self.name,
"manufacturer": "Minut",
}
| 0.001164 |
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| 0 |
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import neuroptikon
from neuro_object import NeuroObject
class Muscle(NeuroObject):
# TODO: stretch receptors?
def __init__(self, network, *args, **keywords):
"""
Muscle objects represent muscles in the :class:`network <Network.Network.Network>` and can be :class:`innervated <Network.Innervation.Innervation>` by :class:`neurites <Network.Neurite.Neurite>`.
Create a muscle by messaging the network:
>>> muscle1 = network.createMuscle()
>>> neuron1.innervate(muscle1)
"""
NeuroObject.__init__(self, network, *args, **keywords)
self._innervations = []
@classmethod
def _fromXMLElement(cls, network, xmlElement):
muscle = super(Muscle, cls)._fromXMLElement(network, xmlElement)
muscle._innervations = []
return muscle
def _needsScriptRef(self):
return True
def innervations(self):
"""
Return the list of :class:`innervations <Network.Innervation.Innervation>` of this muscle.
If no neurites innervate this muscle then an empty list will be returned.
"""
return list(self._innervations)
def connections(self, recurse = True):
return NeuroObject.connections(self, recurse) + self._innervations
def inputs(self, recurse = True):
return NeuroObject.inputs(self, recurse) + self._innervations
def dependentObjects(self):
return NeuroObject.dependentObjects(self) + self.innervations()
@classmethod
def _defaultVisualizationParams(cls):
params = NeuroObject._defaultVisualizationParams()
params['shape'] = 'Capsule'
params['size'] = (.05, .1, .02)
params['color'] = (0.75, 0.5, 0.5)
try:
params['texture'] = neuroptikon.library.texture('Stripes')
except:
pass
params['textureScale'] = 20.0
return params
def defaultVisualizationParams(self):
params = self.__class__._defaultVisualizationParams()
params['label'] = self.abbreviation or self.name
return params
| 0.016407 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provide a TestCase base class for PageTest subclasses' unittests."""
import unittest
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal import story_runner
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import results_options
from telemetry.unittest_util import options_for_unittests
class BasicTestPage(page_module.Page):
def __init__(self, url, page_set, base_dir):
super(BasicTestPage, self).__init__(url, page_set, base_dir)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageTestTestCase(unittest.TestCase):
"""A base class to simplify writing unit tests for PageTest subclasses."""
def CreatePageSetFromFileInUnittestDataDir(self, test_filename):
ps = self.CreateEmptyPageSet()
page = BasicTestPage('file://' + test_filename, ps, base_dir=ps.base_dir)
ps.AddUserStory(page)
return ps
def CreateEmptyPageSet(self):
base_dir = util.GetUnittestDataDir()
ps = page_set_module.PageSet(file_path=base_dir)
return ps
def RunMeasurement(self, measurement, ps,
expectations=test_expectations.TestExpectations(),
options=None):
"""Runs a measurement against a pageset, returning the rows its outputs."""
if options is None:
options = options_for_unittests.GetCopy()
assert options
temp_parser = options.CreateParser()
story_runner.AddCommandLineArgs(temp_parser)
defaults = temp_parser.get_default_values()
for k, v in defaults.__dict__.items():
if hasattr(options, k):
continue
setattr(options, k, v)
measurement.CustomizeBrowserOptions(options.browser_options)
options.output_file = None
options.output_formats = ['none']
options.suppress_gtest_report = True
options.output_trace_tag = None
story_runner.ProcessCommandLineArgs(temp_parser, options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
story_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
start_tracing_called = [False]
stop_tracing_called = [False]
class BuggyMeasurement(measurement_class):
def __init__(self, *args, **kwargs):
measurement_class.__init__(self, *args, **kwargs)
# Inject fake tracing methods to tracing_controller
def TabForPage(self, page, browser):
ActualStartTracing = browser.platform.tracing_controller.Start
def FakeStartTracing(*args, **kwargs):
ActualStartTracing(*args, **kwargs)
start_tracing_called[0] = True
raise exceptions.IntentionalException
browser.StartTracing = FakeStartTracing
ActualStopTracing = browser.platform.tracing_controller.Stop
def FakeStopTracing(*args, **kwargs):
result = ActualStopTracing(*args, **kwargs)
stop_tracing_called[0] = True
return result
browser.platform.tracing_controller.Stop = FakeStopTracing
return measurement_class.TabForPage(self, page, browser)
measurement = BuggyMeasurement()
try:
self.RunMeasurement(measurement, ps, options=options)
except page_test.TestNotSupportedOnPlatformError:
pass
if start_tracing_called[0]:
self.assertTrue(stop_tracing_called[0])
| 0.00731 |
from bs4 import BeautifulSoup
import bs4
import requests
import re
import sys
import json
from urlparse import urljoin, urlparse
from Queue import Queue
email_regex = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
phone_regex = re.compile(r"(?:(?:\+?1\s*(?:[.-]\s*)?)?(?:\(\s*([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s*\)|([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\s*(?:[.-]\s*)?)?([2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?([0-9]{4})(?:\s*(?:#|x\.?|ext\.?|extension)\s*(\d+))?")
class Webpage:
def __init__(self, html, url):
self.html = html
self.url = url
def parse(self):
soup = BeautifulSoup(self.html, "lxml")
strings = [string for string in soup.strings]
# find links
_found_links = []
def merge(url):
_found_links.append(url)
return urljoin(self.url, url)
links = [{"url": merge(a['href']), "name": _scrub(a.text) if _scrub(a.text) else a['href']} for a in soup.find_all("a") if a.get('href') is not None and a.get('href') is not "#" and a.get("href").startswith("mailto:") is not True and a.get("href") not in _found_links]
# find social media
social_media = {}
_social_media_sites = ["facebook.com", "youtube.com", "twitter.com", "linkedin.com", "github.com", "plus.google.com", "instagram.com"]
_social_media_urls = []
for link in links:
for site in _social_media_sites:
if site in link['url'].lower() and link['url'] not in _social_media_urls:
if not (site == "twitter.com" and "/intent/" in link['url']):
if site not in social_media:
social_media[site] = []
social_media[site].append(link)
_social_media_urls.append(link['url'])
del _social_media_sites, _social_media_urls
# find description
description = (soup.find('meta', attrs={'name':'og:description'}) or soup.find('meta', attrs={'property':'description'}) or soup.find('meta', attrs={'name':'description'}))
if description is not None:
description = description.get("content")
# find telephone numbers
telephones = []
i = 0
for string in strings:
for match in phone_regex.finditer(string):
extended = _get_desc_phone(strings, i)
number = match.group(0)
if len(match.string) > 100: # or _alpha_ratio(match.string) > 0.4:
break
if ("EIN" in match.string or "EIN" in extended) and ("tax" in match.string or "tax" in extended):
continue
if extended and extended == match.string:
if not len(_alpha(extended.replace(number, "")).strip()) > 0:
extended = None
elif extended.endswith(number):
extended = extended[:-(len(number))].strip()
if match.string is None:
continue
telephones.append({
"number": number,
"extended": extended
})
break
i += 1
# find emails
emails = []
_emails_alone = []
for email in [email for email in soup.find_all("a") if email.get("href") is not None and email.get("href").startswith("mailto:") is True]:
if email.get('href').startswith("mailto:"):
email_address = email.get("href")[7:]
if not email_address.startswith("?"):
if email_address in _emails_alone:
continue
email_description = email.text + " (" + _get_desc(email, minwords=4, maxlevels=2, doesnt_include=email_regex, repl=email_address) + ")"
emails.append({
"address": email_address,
"extended": _scrub(email_description)
})
_emails_alone.append(email_address)
for string in [s for s in strings if email_regex.match(s)]:
for match in email_regex.finditer(string):
if match.string not in _emails_alone:
_emails_alone.append(match.string)
emails.append({
"address": match.string,
"extended": string
})
del _emails_alone # might as well, save memory
return {
"links": links,
"url": self.url,
"social_media": social_media,
"description": description,
"telephones": telephones,
"emails": emails
}
def _get_desc_phone(strings, i):
extended = strings[i]
if len(re.sub(phone_regex, "", extended).strip()) > 0:
return extended
j = i - 1
while len(extended) < 100:
try:
previous = strings[j]
if not phone_regex.match(previous): # if there is a phone number in the extended text, we are probably outside the relevant boundary
extended = strings[j] + " " + extended
else:
break
except IndexError:
break
j -= 1
extended = _scrub(extended)
if _alpha_ratio(extended) < 0.5:
return strings[i]
return extended
def _get_desc(element, minwords=3, maxlength=140, maxlevels=3, doesnt_include=None, repl=""):
levels = 0
desc = element.getText()
previous = element
while len(desc.split(" ")) <= minwords and levels <= maxlevels:
if previous is None:
break
new_desc = previous.getText(separator=u' ')
if doesnt_include is not None and doesnt_include.match(new_desc.replace(repl, "")):
break
if _alpha_ratio(new_desc) < 0.7:
break
desc = new_desc
if len(previous.parent.text) > len(previous.text)*8:
previous = previous.previousSibling
while isinstance(previous, bs4.element.NavigableString):
previous = previous.previousSibling
else:
previous = previous.parent
levels += 1
if len(desc) > maxlength:
return "..." + desc[-maxlength:]
return desc
def _scrub(string):
string = string.strip()
string = string.replace(" , ", ", ")
string = string.replace("\\n", " ")
if string.startswith(", "):
string = string[2:]
while " " in string:
string = string.replace(" ", " ")
return string.strip()
def webpage_from_url(url):
return Webpage(requests.get(url).text, url)
def _alpha_ratio(string):
only = _alpha(string)
ratio = len(only) / (len(string) + 0.01)
return ratio
def _alpha(string):
only = re.sub(r'\W+', '', string)
return only
# simple alias for constructor
def webpage_from_text(html, url=""):
return Webpage(html, url)
def recursive_parse(url, verbose=False, max_depth=1, local=True):
hostname = urlparse(url).hostname
if verbose:
print "Recursively parsing site with max depth of " + str(max_depth) + " @ " + url
responses = []
queue = Queue()
queue.put((0, url))
seen_urls = []
while queue.qsize() > 0:
level, url = queue.get()
seen_urls.append(url.lower())
if verbose:
print ' ' + (" "*level) + " - " + url
response = webpage_from_url(url).parse()
responses.append(response)
if level + 1 <= max_depth:
for link in response['links']:
href = link['url']
if href.lower() not in seen_urls:
if (not local) or (urlparse(href).hostname == hostname):
queue.put((level + 1, href))
seen_urls.append(href.lower())
def merge_responses(*responses):
out = {
"links": [],
"social_media": {},
"urls": {},
"telephones": [],
"emails": []
}
_seen_links = []
_seen_emails = []
for response in responses:
for link in response['links']:
# computational complexity: O(n^2) :)
if link['url'] not in _seen_links:
out['links'].append(link)
_seen_links.append(link['url'])
for k,v in response['social_media'].items():
if k not in out['social_media']:
out['social_media'][k] = []
out['social_media'][k]
| 0.003859 |
#!/usr/bin/python2
# Authors:
# Jason Gerard DeRose <[email protected]>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python-level packaging using distutils.
"""
from distutils.core import setup
from distutils.command.install_data import install_data as _install_data
from distutils.util import change_root, convert_path
from distutils import log
from types import StringType
import ipalib
import os
class install_data(_install_data):
"""Override the built-in install_data to gzip files once they
are installed.
"""
def run(self):
# install_data is a classic class so super() won't work. Call it
# directly to copy the files first.
_install_data.run(self)
# Now gzip them
for f in self.data_files:
if type(f) is StringType:
# it's a simple file
f = convert_path(f)
cmd = '/bin/gzip %s/%s' % (self.install_dir, f)
log.info("gzipping %s/%s" % (self.install_dir, f))
os.system(cmd)
else:
# it's a tuple with path and a list of files
dir = convert_path(f[0])
if not os.path.isabs(dir):
dir = os.path.join(self.install_dir, dir)
elif self.root:
dir = change_root(self.root, dir)
if f[1] == []:
# If there are no files listed the user must be
# trying to create an empty directory. So nothing
# to do here.
pass
else:
# gzip the files
for data in f[1]:
data = convert_path(data)
cmd = '/bin/gzip %s/%s' % (dir, data)
log.info("gzipping %s/%s" % (dir, data))
os.system(cmd)
setup(
name='freeipa',
version=ipalib.__version__,
license='GPLv3+',
url='http://freeipa.org/',
packages=[
'ipalib',
'ipalib.plugins',
'ipaserver',
'ipaserver.advise',
'ipaserver.advise.plugins',
'ipaserver.plugins',
'ipaserver.install',
'ipaserver.install.plugins',
'ipaserver.install.server',
],
scripts=['ipa'],
data_files = [('share/man/man1', ["ipa.1"])],
)
| 0.001316 |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import yaml
def _get_smartstack_proxy_ports_from_file(root, file):
"""Given a root and file (as from os.walk), attempt to return the highest
smartstack proxy port number (int) from that file. Returns 0 if there is no
smartstack proxy_port.
"""
ports = set()
with open(os.path.join(root, file)) as f:
data = yaml.safe_load(f)
if file.endswith("service.yaml") and "smartstack" in data:
# Specifying this in service.yaml is old and deprecated and doesn't
# support multiple namespaces.
ports = {int(data["smartstack"].get("proxy_port", 0))}
elif file.endswith("smartstack.yaml"):
for namespace in data.keys():
ports.add(data[namespace].get("proxy_port", 0))
return ports
def read_etc_services():
with open("/etc/services") as fd:
return fd.readlines()
def get_inuse_ports_from_etc_services():
ports = set()
for line in read_etc_services():
if line.startswith("#"):
continue
try:
p = line.split()[1]
port = int(p.split("/")[0])
ports.add(port)
except Exception:
pass
return ports
def suggest_smartstack_proxy_port(
yelpsoa_config_root, range_min=19000, range_max=21000
):
"""Pick a random available port in the 19000-21000 block"""
available_proxy_ports = set(range(range_min, range_max + 1))
for root, dirs, files in os.walk(yelpsoa_config_root):
for f in files:
if f.endswith("smartstack.yaml"):
try:
used_ports = _get_smartstack_proxy_ports_from_file(root, f)
for used_port in used_ports:
available_proxy_ports.discard(used_port)
except Exception:
pass
available_proxy_ports.difference_update(get_inuse_ports_from_etc_services())
try:
return random.choice(list(available_proxy_ports))
except IndexError:
raise Exception(
f"There are no more ports available in the range [{range_min}, {range_max}]"
)
# vim: expandtab tabstop=4 sts=4 shiftwidth=4:
| 0.000725 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import iteritems
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer']
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'\:[\w-]+', Name.Decorator),
(r'\.[\w-]+', Name.Class),
(r'\#[\w-]+', Name.Function),
(r'@[\w-]+', Keyword, 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'\}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline-color',
'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x',
'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top',
'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Keyword),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number),
# Separate regex for percentages, as can't do word boundaries with %
(r'[.-]?[0-9]*[.]?[0-9]+%', Number),
(r'-?[0-9]+', Number),
(r'[~^*!%&<>|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name)
]
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color',
'outline-style', 'outline-width', 'overflow', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'padding', 'page',
'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
.. versionadded:: 1.3
"""
name = 'Sass'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
| 0.004762 |
#!/usr/bin/env python3
import pyphen
import sys
import re
hyphenator = pyphen.Pyphen(filename='patterns/hyph_la_classical.dic',left=2,right=2)
seenSegs = {}
line = 0
def comparenoncompletehyphens(original, obtained):
i = 0
for c in obtained:
if c == '-':
if original[i] == '-':
i = i + 1
else:
if original[i] == '-':
return False
else:
i = i + 1
return True
def printError(wrong, correct, base):
print('%s %% %s (not %s)' % (base, correct, wrong))
def dotest(filename, allhyphens=True):
global hyphenator, seenSegs
print('differences in '+filename+':')
linenum = 0
with open(filename, 'r') as f:
for line in f:
linenum += 1
line = line.strip()
line = re.sub('\s*\%.*', '', line)
base = line.replace('-', '')
if base in seenSegs and line != seenSegs[base][1]:
print('ERROR: line %d: test \'%s\' differs from test \'%s\' line %d in %s' % (linenum, line, seenSegs[base][1], seenSegs[base][0], seenSegs[base][2]))
else:
seenSegs[base] = (linenum, line, filename)
new = hyphenator.inserted(base)
if allhyphens:
if not line == new:
printError(new, line, base)
else:
if not comparenoncompletehyphens(line, new):
printError(new, line, base)
dotest('tests/nonliturgical/wordlist-classical-italian.txt')
print()
dotest('tests/nonliturgical/wordlist-classical-only.txt')
| 0.030951 |
from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
return list(self.object_list)[index]
# The following four methods are only necessary for Python <2.6
# compatibility (this class could just extend 2.6's collections.Sequence).
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum([1 for v in self if v == value])
# End of compatibility methods.
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| 0.001335 |
from keras import regularizers
from keras.models import Model, Sequential
from keras.layers import Input, Activation, Conv3D, MaxPooling3D, ZeroPadding3D,UpSampling3D,Dropout, BatchNormalization, Flatten, GaussianDropout, Dense, concatenate, AveragePooling3D
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D,UpSampling2D,Dropout, BatchNormalization, Flatten, GaussianDropout, Dense, concatenate, AveragePooling3D
from keras.layers.advanced_activations import LeakyReLU, ELU, PReLU
import keras.backend as K
from keras.layers.merge import Concatenate
import numpy as np
from scipy.misc import imread
from scipy.ndimage import convolve
import os
def block_b(in_layer,num_filters=24, kern=(3,3),padding=('same','same'),
batchnorm=False,dropout=None, weight_reg=0,stride=(1,1), num_convs=2,residual=False):
#print(kern,stride,padding)
out_layer = in_layer
for i in range(num_convs):
out_layer=Conv2D(num_filters,kern, padding='same', activation='relu',
kernel_regularizer = regularizers.l1(weight_reg))(out_layer)
if residual is True:
out_layer=Concatenate(axis=1)([out_layer,in_layer])
out_layer=Conv2D(num_filters,kern, padding='valid', strides = stride, activation='relu',
kernel_regularizer = regularizers.l1(weight_reg))(out_layer)
if dropout is not None:
out_layer=Dropout(dropout)(out_layer)
if batchnorm:
out_layer=BatchNormalization(axis=1)(out_layer)
return out_layer
def build_model(image_size,num_classes):
inputs = Input(image_size)
conv = block_b(inputs,num_filters=8, kern=(3,3),padding=('same','same'),
batchnorm=False,dropout=0.2, weight_reg=0,stride=(1,1), num_convs=2,residual=True)
conv = block_b(conv,num_filters=32, kern=(3,3),padding=('same','same'),
batchnorm=False,dropout=0.2, weight_reg=0,stride=(1,1), num_convs=2,residual=True)
conv = block_b(conv,num_filters=16, kern=(3,3),padding=('same','same'),
batchnorm=False,dropout=0.2, weight_reg=0,stride=(1,1), num_convs=2,residual=True)
# conv = block_b(conv,num_filters=16, kern=(3,3),padding=('same','same'),
# batchnorm=False,dropout=0.2, weight_reg=0,stride=(1,1), num_convs=2,residual=True)
flat = Flatten()(conv)
dense = Dense(128,kernel_regularizer=regularizers.l1(0.01))(flat)
dense = LeakyReLU()(dense)
dense = Dropout(0.2)(dense)
dense = Dense(num_classes,activation='sigmoid')(dense)
model = Model(inputs,dense)
return model
def train_model(model,train,log_dir,weights_file):
from keras.optimizers import Adam
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam,loss=['mse'])
from keras.callbacks import TensorBoard
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
es_cb = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=0, mode='auto')
tb_cb = TensorBoard(log_dir=log_dir, histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
mc_cb = ModelCheckpoint(weights_file,save_best_only=True,monitor='val_loss')
model.fit(train[0],
train[1],
batch_size=61,shuffle=True,
callbacks=[tb_cb,es_cb,mc_cb],
epochs=1000,validation_split=0.25)
return model
def get_image(str, downsample=False):
im = imread(str,flatten=False,mode='RGB').T#channels first
if downsample:
im = np.stack([convolve(im[i,:,:],np.array([[0.25,0.25],[0.25,0.25]]), mode='nearest')[:im.shape[1]:2,:im.shape[2]:2] for i in range(3)],axis=0)
return im
def predict_all(img_folder,model):
import glob
files = glob.glob("{}/*.png".format(img_folder))
inputs = []
for f in files:
inputs.append(get_image(f,downsample=True))
inputs = np.stack(inputs,axis=0).astype(float)
#normalize
mean = np.mean(inputs.flatten())
std = np.std(inputs.flatten())
inputs -= mean
inputs /= (std+1e-15)
predict = model.predict(inputs)
print("Classifications:")
for i,f in enumerate(files):
p = predict[i,:]
print("\tfile: {:<10}\t double: {:.2f}\tsingle: {:.2f}".format(f,p[0],p[1]))
def load_model_from_run_dir(dir):
from keras.models import model_from_json
f = open("{}/model-arch.json".format(dir),'r')
model = model_from_json(f.read())
f.close()
model.load_weights("{}/weights.hdf5".format(dir))
return model
def make_batches_from_protos():
yes = np.genfromtxt('data/hatfield_multiple_20x20_ID19_prototype_6_0.csv',skip_header=1,delimiter=',')[:,18]
no = np.genfromtxt('data/hatfield_multiple_20x20_ID19_prototype_0_0.csv',skip_header=1,delimiter=',')[:,18]
inputs = []
outputs = []
for i in range(len(yes)):
file = "data/img/{}.png".format(int(yes[i]))
inputs.append(get_image(file,downsample=True))
outputs.append([1,0])
for i in range(len(no)):
file = "data/img/{}.png".format(int(no[i]))
inputs.append(get_image(file,downsample=True))
outputs.append([0,1])
inputs = np.stack(inputs,axis=0).astype(float)
#normalize
mean = np.mean(inputs.flatten())
std = np.std(inputs.flatten())
inputs -= mean
inputs /= (std+1e-15)
outputs = np.stack(outputs,axis=0).astype(float)
yes = np.genfromtxt('data/hatfield_multiple_20x20_ID19_prototype_7_0.csv',skip_header=1,delimiter=',')[:,18]
no = np.genfromtxt('data/hatfield_multiple_20x20_ID19_prototype_0_17.csv',skip_header=1,delimiter=',')[:,18]
val_inputs = []
val_outputs = []
for i in range(len(yes)):
file = "data/img/{}.png".format(int(yes[i]))
val_inputs.append(get_image(file,downsample=True))
val_outputs.append([1,0])
for i in range(len(no)):
file = "data/img/{}.png".format(int(no[i]))
val_inputs.append(get_image(file,downsample=True))
val_outputs.append([0,1])
valinputs = np.stack(val_inputs,axis=0).astype(float)
#normalize
val_inputs -= mean
val_inputs /= (std+1e-15)
val_outputs = np.stack(val_outputs,axis=0).astype(float)
def make_batch_from_classifications():
names = ["Josh","Alex","Huub","Duy","Erik","Pedro"]
inputs = []
outputs = []
for name in names:
d = np.genfromtxt('data/classification - {}.csv'.format(name),
skip_header=1,filling_values=0.,delimiter=',')
#d = np.pad(d,((0,0),(0,9-d.shape[1])),'constant')
mask = np.sum(d[:,1:],axis=1) > 0
d = d[mask,:]
classifications = np.zeros(d.shape[0])
for i in range(d.shape[1]-1):
classifications += d[:,i+1]*(1<<i)
classifications = (np.arange(1<<7) == classifications[..., None]-1).astype(int)
for i in range(d.shape[0]):
file = "data/img/{:05d}.png".format(int(d[i,0]))
inputs.append(get_image(file,downsample=True))
#single/no, double/no, core/no, diffuse bridge/no, symmetric/no, dd/no, UC/no
#outputs.append(classifications[i,:])
outputs.append(d[i,1:])
inputs = np.stack(inputs,axis=0).astype(float)
inputs -= np.mean(inputs.flatten())
inputs /= np.std(inputs.flatten()) + 1e-10
outputs = np.stack(outputs,axis=0).astype(float)
# class_weights_ = np.sum(outputs,axis=0)/np.sum(outputs.flatten())
# class_weights = 1./class_weights_
# class_weights[class_weights_ == 0] = 0.
# class_weights = {i : class_weights[i] for i in range(int(np.size(class_weights)))}
return inputs, outputs#, class_weights
if __name__=='__main__':
# inputs = np.random.uniform(size=(64,1,128,128))
# outputs = np.random.randint(2,size=(64))
# outputs = (np.arange(outputs.max()+1) == outputs[..., None]-1).astype(int)
inputs, outputs = make_batch_from_classifications()
print("Train Data shapes:",inputs.shape,outputs.shape)
data = (inputs,outputs)
output_folder = 'output_lofargroup_sigmoid'
for i in range(100):
run_dir = '{}/run{}'.format(output_folder,i)
log_dir = '{}/logs/run{}'.format(output_folder,i)
try:
os.makedirs(run_dir)
os.makedirs(log_dir)
except:
continue
model = build_model(inputs.shape[1:],outputs.shape[1])
f = open("{}/model-arch.json".format(run_dir),"w")
f.write(model.to_json())
f.close()
model = train_model(model,data,log_dir,"{}/weights.hdf5".format(run_dir))
#model = load_model_from_run_dir(run_dir)
#predict_all("data/img",model)
| 0.023515 |
# Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import json
import sys
import requests
sys.path.append('../../robot-arm')
from dobot.client import Dobot
from dobot.utils import detect_dobot_port, dobot_is_on_port
DEFAULT_BAUDRATE = 115200
class SerialDobotCalibrator(object):
def __init__(self, port):
self.dobot = Dobot(port, DEFAULT_BAUDRATE)
def get_position(self):
pose = self.dobot.get_pose()
return {'x': pose['x'], 'y': pose['y'], 'z': pose['z']}
def initialize(self):
self.dobot.initialize()
class HTTPDobotCalibrator(object):
base_url = ""
def __init__(self, ipaddress):
self.base_url = "http://{}".format(ipaddress)
print self.base_url
def get_position(self):
r = requests.get(self.base_url + '/api/status')
if 200 != r.status_code:
print "Error: unable to connect to server."
msg = "Error: Please check network or the 'robot api' is working on host machine."
raise Exception(msg)
value_ = r.content
decode_data = json.loads(value_)
x = decode_data['x']
y = decode_data['y']
z = decode_data['z']
return {'x': x, 'y': y, 'z': z}
def initialize(self):
requests.post(self.base_url + '/api/init')
def _request(url):
r = requests.get(url)
if 200 != r.status_code:
print "Error: unable to connect to server."
msg = "Error: Please check network or the 'robot api' is working on host machine."
raise Exception(msg)
return r.content
def wait_for_keystroke(mark_id):
raw_input(
"Push the button (marked as 'unlock') which is located in middle of arm) to release the arm and then slowly move the arm edge to slightly touch \n '{}' on marker sheet.\nAfter you finished, press Enter.".format(
mark_id))
if '__main__' == __name__:
parser = argparse.ArgumentParser(description='Run Dobot WebAPI.')
parser.add_argument('--http', dest='http', action='store_true', default=False)
parser.add_argument('--api-uri', type=str, default="127.0.0.1:8000")
parser.add_argument('--dobot-port', type=str, default=None)
parser.add_argument('--tuner-file', type=str, default='/tmp/robot_tuner.dat')
args = parser.parse_args()
if args.http:
tuner = HTTPDobotCalibrator(args.api_uri)
print('via http')
else:
if args.dobot_port is None:
dobot_port = detect_dobot_port(DEFAULT_BAUDRATE)
if dobot_port is None:
print('dobot offline')
exit(1)
else:
dobot_port = args.dobot_port
if not dobot_is_on_port(dobot_port, DEFAULT_BAUDRATE):
print('dobot is not detected on port {}'.format(dobot_port))
exit(1)
print('via {}'.format(dobot_port))
tuner = SerialDobotCalibrator(dobot_port)
val_arr = []
raw_input("PRESS Enter to start dobot arm initialization protocol.")
tuner.initialize()
print ""
wait_for_keystroke("Marker A")
value = tuner.get_position()
print ">> Marker A(x,y,z)={}".format(value)
val_arr.append(value)
print ""
wait_for_keystroke("Marker D")
value = tuner.get_position()
print ">> Marker D(x,y,z)={}".format(value)
val_arr.append(value)
print ""
wait_for_keystroke("Marker E")
value = tuner.get_position()
print ">> Marker E(x,y,z)={}".format(value)
val_arr.append(value)
print ""
with open('/tmp/robot_tuner.dat', 'w') as writefile:
for entry in val_arr:
json.dump(entry, writefile)
writefile.write('\n')
| 0.001621 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.