text
stringlengths
820
1M
score
float64
0
0.24
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # Coded by: [email protected] # ############################################################################## from openerp.tools.translate import _ from openerp import api, models, fields, tools import logging import time from openerp.modules.module import get_module_resource _logger = logging.getLogger(__name__) class calificacion(models.Model): #_inherit = 'prof.category' _name = 'sspp.calificacion' _description = 'Formulario De Calificacion de Proyecto' #project_id = fields.Many2one('sspp.proyecto', 'Proyecto' ,ondelete='set null',requiered=True ) #, domain=[('profAssesor','=','uid')]) student = fields.Many2one('res.users' , ondelete='set null', string="Estudiante", index=True ) carnet = fields.Integer() firstReportProf = fields.Integer() secondReportProf = fields.Integer() finalReportProf = fields.Integer() firstReportComp = fields.Integer() secondReportComp = fields.Integer() finalReportComp = fields.Integer() finalPresentation = fields.Integer() #score = fields.Integer(compute='compute_total') score = fields.Integer() #comments= fields.Text('Comentarios', size=256 , help='Comentarios adicionales') #approvedBy = fields.Many2one('res.users', 'Aprobado por', ondelete='set null', requiered=False) @api.onchange('firstReportProf', 'secondReportProf','finalReportProf','firstReportComp','secondReportComp','finalReportComp','finalPresentation') def onchange_field(self): if self.firstReportProf or self.secondReportProf or self.finalReportProf or self.firstReportComp or self.secondReportComp or self.finalReportComp or self.finalPresentation : self.score = self.firstReportProf + self.secondReportProf + self.finalReportProf + self.firstReportComp + self.secondReportComp + self.finalReportComp + self.finalPresentation # @api.depends('firstReportProf', 'secondReportProf','finalReportProf','firstReportComp','secondReportComp','finalReportComp','finalPresentation') # @api.one # #@api.depends('sspp.valoresCalificacion.firstReportProf', 'sspp.valoresCalificacion.secondReportProf','sspp.valoresCalificacion.finalReportProf','sspp.valoresCalificacion.firstReportComp','sspp.valoresCalificacion.secondReportComp','sspp.valoresCalificacion.finalReportComp','sspp.valoresCalificacion.finalPresentation') # # @api.onchange('finalPresentation','valoresCalificacion.finalPresentation') # def compute_total(self): # instanciaValues = self.env['sspp.valorescalificacion'] # reg = instanciaValues.search([('id', '!=', 69)]) # r1 = (self.firstReportProf * (reg.firstReportProfValue / 100)) + (self.firstReportComp * (reg.firstReportCompValue / 100)) # r2 = (self.secondReportProf * (reg.secondReportProfValue / 100)) + (self.secondReportComp * (reg.secondReportCompValue / 100)) # r3 = (self.finalReportProf * (reg.finalReportProfValue / 100)) + (self.finalReportComp * (reg.finalReportCompValue / 100)) # p = self.finalPresentation * (reg.finalPresentation / 100) # #p = self.finalPresentation * (valoresCalificacion.finalPresentation / 100) # #r1 = (self.firstReportProf * sspp.valoresCalificacion.firstReportProfValue) + (self.firstReportComp * sspp.valoresCalificacion.firstReportCompValue) # #r2 = (self.secondReportProf * sspp.valoresCalificacion.secondReportProfValue) + (self.secondReportComp * sspp.valoresCalificacion.secondReportCompValue) # #r3 = (self.finalReportProf * sspp.valoresCalificacion.finalReportProfValue) + (self.finalReportComp * sspp.valoresCalificacion.finalReportCompValue) # #p = self.finalPresentation * sspp.valoresCalificacion.finalPresentationValue # #x = r1 + r2 + r3 + p # self.score = r1 + r2 + r3 + p # #self.score = p # def compute_total(self): # #instanciaValues = self.env['sspp.valorescalificacion'] # #reg = instanciaValues.search([('id', '!=', 69)]) # r1 = self.firstReportProf #* (reg.firstReportProfValue / 100)) + (self.firstReportComp * (reg.firstReportCompValue / 100)) # r2 = self.secondReportProf #* (reg.secondReportProfValue / 100)) + (self.secondReportComp * (reg.secondReportCompValue / 100)) # r3 = self.finalReportProf #* (reg.finalReportProfValue / 100)) + (self.finalReportComp * (reg.finalReportCompValue / 100)) # p = self.finalPresentation #* (reg.finalPresentation / 100) # r4 = self.firstReportComp + secondReportComp +finalReportComp + firstReportProf + secondReportProf + finalReportProf + finalPresentation # r5 = self.secondReportComp # r6 = self.secondReportComp # #p = self.finalPresentation * (valoresCalificacion.finalPresentation / 100) # #r1 = (self.firstReportProf * sspp.valoresCalificacion.firstReportProfValue) + (self.firstReportComp * sspp.valoresCalificacion.firstReportCompValue) # #r2 = (self.secondReportProf * sspp.valoresCalificacion.secondReportProfValue) + (self.secondReportComp * sspp.valoresCalificacion.secondReportCompValue) # #r3 = (self.finalReportProf * sspp.valoresCalificacion.finalReportProfValue) + (self.finalReportComp * sspp.valoresCalificacion.finalReportCompValue) # #p = self.finalPresentation * sspp.valoresCalificacion.finalPresentationValue # #x = r1 + r2 + r3 + p # self.score = r1 + r2 + r3 + p + r4 + r5 + r6 # #self.score = p @api.multi def sendMailStudent(self, body, subject): #Sends to Professor Assesor mail_mail = self.env['mail.mail'] mail_values = { 'email_from':self.student.email, 'email_to': self.student.email, 'subject': subject, 'body_html': body, 'state': 'outgoing', 'type': 'email', } mail_id = mail_mail.create( mail_values) mail_mail.send([mail_id]) @api.multi def sendMailProfAssesor(self, body, subject): #Sends to Professor Assesor mail_mail = self.env['mail.mail'] mail_values = { 'email_from':self.project_id.profAssesor.email, 'email_to': self.project_id.profAssesor.email, 'subject': subject, 'body_html': body, 'state': 'outgoing', 'type': 'email', } mail_id = mail_mail.create( mail_values) mail_mail.send([mail_id]) @api.multi def sendMailAdmin(self, body, subject): #Sends to Professor Assesor mail_mail = self.env['mail.mail'] users = self.env['res.users'].search([('isAdmin','=',True)]) for admins in users: mail_values = { 'email_from':admins.email, 'email_to': admins.email, 'subject': subject, 'body_html': body, 'state': 'outgoing', 'type': 'email', } mail_id = mail_mail.create( mail_values) mail_mail.send([mail_id]) @api.multi def write(self, vals): _logger.critical(' vals %s', vals) #vals['score'] = int(vals['firstReportComp']) + int(vals['secondReportComp']) + int(vals['finalReportComp']) + int(vals['firstReportProf']) + int(vals['secondReportProf']) + int(vals['finalReportProf']) + int(vals['finalPresentation']) #c = 0 #x = 0 #for val in vals: # x += val[c][1] # c += 1 #vals['score'] = self.firstReportComp + self.secondReportComp + self.finalReportComp + self.firstReportProf + self.secondReportProf + self.finalReportProf + self.finalPresentation + self.score #vals['score'] = x super(calificacion, self).write(vals) #super(calificacion, self).write(vals) #''' " %s," % (rec.project_id.student.name) + ''' #self.score = self.firstReportProf body = ''' Estimado estudiante <p></p> <p> Su nota del curso se ha actualizado a: ''' "%s" % self.score + ''' .</p> <p></p> <p>Esto es un mensaje automatico, favor no responder. </p> <p>Saludos, Coordindacion del curso de Practica </p> ''' subject = "Nota del curso actualizada" self.sendMailStudent(body,subject) _logger.critical(' vals %s', self.score) #self.score = self.firstReportProf #rec.sendMailProfAssesor(body,subject) return True _defaults = { #'profAssesor': lambda obj, cr, uid, context: uid, 'firstReportProf' : 0, 'secondReportProf' : 0, 'finalReportProf' : 0, 'firstReportComp' : 0, 'secondReportComp' : 0, 'finalReportComp' : 0 , 'finalPresentation' : 0 , 'score' : 0, } class valoresCalificacion(models.Model): #_inherit = 'prof.category' _name = 'sspp.valorescalificacion' _description = 'Valores porcentuales De Calificacion de Proyecto' name = fields.Char('Nombre Del Proyecto', size=256 ) firstReportProfValue = fields.Integer() secondReportProfValue = fields.Integer() finalReportProfValue = fields.Integer() firstReportCompValue = fields.Integer() secondReportCompValue = fields.Integer() finalReportCompValue = fields.Integer() #finalReportCompValue = fields.Integer() finalPresentation = fields.Integer() _defaults = { 'name' : 'lol', 'firstReportProfValue' : 14, 'secondReportProfValue' : 10, 'finalReportProfValue' : 14, 'firstReportCompValue' : 10, 'secondReportCompValue' : 22, 'finalReportCompValue' : 10 , 'finalPresentation' : 20, }
0.028318
""" Component to offer a way to set a numeric value from a slider or text box. For more details about this component, please refer to the documentation at https://home-assistant.io/components/input_number/ """ import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.const import ( ATTR_ENTITY_ID, ATTR_UNIT_OF_MEASUREMENT, CONF_ICON, CONF_NAME, CONF_MODE) from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import async_get_last_state _LOGGER = logging.getLogger(__name__) DOMAIN = 'input_number' ENTITY_ID_FORMAT = DOMAIN + '.{}' CONF_INITIAL = 'initial' CONF_MIN = 'min' CONF_MAX = 'max' CONF_STEP = 'step' MODE_SLIDER = 'slider' MODE_BOX = 'box' ATTR_VALUE = 'value' ATTR_MIN = 'min' ATTR_MAX = 'max' ATTR_STEP = 'step' ATTR_MODE = 'mode' SERVICE_SET_VALUE = 'set_value' SERVICE_INCREMENT = 'increment' SERVICE_DECREMENT = 'decrement' SERVICE_DEFAULT_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids }) SERVICE_SET_VALUE_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_VALUE): vol.Coerce(float), }) def _cv_input_number(cfg): """Configure validation helper for input number (voluptuous).""" minimum = cfg.get(CONF_MIN) maximum = cfg.get(CONF_MAX) if minimum >= maximum: raise vol.Invalid('Maximum ({}) is not greater than minimum ({})' .format(minimum, maximum)) state = cfg.get(CONF_INITIAL) if state is not None and (state < minimum or state > maximum): raise vol.Invalid('Initial value {} not in range {}-{}' .format(state, minimum, maximum)) return cfg CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ cv.slug: vol.All({ vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_MIN): vol.Coerce(float), vol.Required(CONF_MAX): vol.Coerce(float), vol.Optional(CONF_INITIAL): vol.Coerce(float), vol.Optional(CONF_STEP, default=1): vol.All(vol.Coerce(float), vol.Range(min=1e-3)), vol.Optional(CONF_ICON): cv.icon, vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_MODE, default=MODE_SLIDER): vol.In([MODE_BOX, MODE_SLIDER]), }, _cv_input_number) }) }, required=True, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up an input slider.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] for object_id, cfg in config[DOMAIN].items(): name = cfg.get(CONF_NAME) minimum = cfg.get(CONF_MIN) maximum = cfg.get(CONF_MAX) initial = cfg.get(CONF_INITIAL) step = cfg.get(CONF_STEP) icon = cfg.get(CONF_ICON) unit = cfg.get(ATTR_UNIT_OF_MEASUREMENT) mode = cfg.get(CONF_MODE) entities.append(InputNumber( object_id, name, initial, minimum, maximum, step, icon, unit, mode)) if not entities: return False component.async_register_entity_service( SERVICE_SET_VALUE, SERVICE_SET_VALUE_SCHEMA, 'async_set_value' ) component.async_register_entity_service( SERVICE_INCREMENT, SERVICE_DEFAULT_SCHEMA, 'async_increment' ) component.async_register_entity_service( SERVICE_DECREMENT, SERVICE_DEFAULT_SCHEMA, 'async_decrement' ) await component.async_add_entities(entities) return True class InputNumber(Entity): """Representation of a slider.""" def __init__(self, object_id, name, initial, minimum, maximum, step, icon, unit, mode): """Initialize an input number.""" self.entity_id = ENTITY_ID_FORMAT.format(object_id) self._name = name self._current_value = initial self._minimum = minimum self._maximum = maximum self._step = step self._icon = icon self._unit = unit self._mode = mode @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return the name of the input slider.""" return self._name @property def icon(self): """Return the icon to be used for this entity.""" return self._icon @property def state(self): """Return the state of the component.""" return self._current_value @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit @property def state_attributes(self): """Return the state attributes.""" return { ATTR_MIN: self._minimum, ATTR_MAX: self._maximum, ATTR_STEP: self._step, ATTR_MODE: self._mode, } async def async_added_to_hass(self): """Run when entity about to be added to hass.""" if self._current_value is not None: return state = await async_get_last_state(self.hass, self.entity_id) value = state and float(state.state) # Check against None because value can be 0 if value is not None and self._minimum <= value <= self._maximum: self._current_value = value else: self._current_value = self._minimum async def async_set_value(self, value): """Set new value.""" num_value = float(value) if num_value < self._minimum or num_value > self._maximum: _LOGGER.warning("Invalid value: %s (range %s - %s)", num_value, self._minimum, self._maximum) return self._current_value = num_value await self.async_update_ha_state() async def async_increment(self): """Increment value.""" new_value = self._current_value + self._step if new_value > self._maximum: _LOGGER.warning("Invalid value: %s (range %s - %s)", new_value, self._minimum, self._maximum) return self._current_value = new_value await self.async_update_ha_state() async def async_decrement(self): """Decrement value.""" new_value = self._current_value - self._step if new_value < self._minimum: _LOGGER.warning("Invalid value: %s (range %s - %s)", new_value, self._minimum, self._maximum) return self._current_value = new_value await self.async_update_ha_state()
0
__author__ = 'jmeireles' import re from fetcher import AbstractFetcher from urllib import unquote class Fetcher(AbstractFetcher): rootApi = "/api/player.api.php" keys = { "domain": "undefined", "file": "undefined", "filekey": "undefined", "cid": "undefined", } def fetch(self, url): soup = self.requester.get(url) for key in self.keys: tmp = re.search('(?<=flashvars.'+key+'=)"?(?P<match>[^";]+)', soup.text) if tmp: self.keys[key] = unquote(tmp.group("match")).decode('utf8') if self.keys['filekey'] == 'fkz': tmp = re.search('fkz="?(?P<match>[^";]+)', soup.text) if tmp: self.keys['filekey'] = unquote(tmp.group("match")).decode('utf8') params = {'cid': self.keys['cid'], 'file': self.keys['file'], 'key': self.keys['filekey']} url = self.requester.build_url(self.keys['domain'] + self.rootApi, params) ''' Api request ''' response = self.requester.get(url, True) found = re.search('[domain|url]=(?P<url>.*?)&', response.text) if found: found = unquote(found.group("url")).decode('utf8') return found
0.003981
#!/usr/bin/env python import os from Cython.Compiler.Main import compile from distutils.core import setup, Extension from distutils.command.build_ext import build_ext source_root = os.path.dirname(__file__) compiled_modules = [ "sympy.polys.densearith", "sympy.polys.densebasic", "sympy.polys.densetools", "sympy.polys.galoistools", "sympy.polys.factortools", "sympy.polys.specialpolys", "sympy.polys.monomialtools", ] extensions = [] for module in compiled_modules: source_file = os.path.join(source_root, *module.split('.')) + ".py" print("Compiling module %s ..." % module) result = compile(source_file) if result.c_file is None: raise RuntimeError("failed to compile %s" % module) extensions.append( Extension(module, sources=[result.c_file], extra_compile_args=['-O2', '-Wall'], ) ) setup( name = "SymPy", packages = [ "sympy", "sympy.polys", ], cmdclass = { "build_ext": build_ext }, ext_modules = extensions )
0.012987
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals, print_function import frappe import getpass from frappe.utils.password import update_password def before_install(): frappe.reload_doc("core", "doctype", "docfield") frappe.reload_doc("core", "doctype", "docperm") frappe.reload_doc("core", "doctype", "doctype") def after_install(): # reset installed apps for re-install frappe.db.set_global("installed_apps", '["frappe"]') install_basic_docs() from frappe.core.doctype.file.file import make_home_folder make_home_folder() import_country_and_currency() from frappe.core.doctype.language.language import sync_languages sync_languages() # save default print setting print_settings = frappe.get_doc("Print Settings") print_settings.save() # all roles to admin frappe.get_doc("User", "Administrator").add_roles(*frappe.db.sql_list("""select name from tabRole""")) # update admin password update_password("Administrator", get_admin_password()) # setup wizard now in frappe frappe.db.set_default('desktop:home_page', 'setup-wizard') frappe.db.commit() def install_basic_docs(): # core users / roles install_docs = [ {'doctype':'User', 'name':'Administrator', 'first_name':'Administrator', 'email':'[email protected]', 'enabled':1, "is_admin": 1, 'roles': [{'role': 'Administrator'}] }, {'doctype':'User', 'name':'Guest', 'first_name':'Guest', 'email':'[email protected]', 'enabled':1, "is_guest": 1, 'roles': [{'role': 'Guest'}] }, {'doctype': "Role", "role_name": "Report Manager"}, {'doctype': "Workflow State", "workflow_state_name": "Pending", "icon": "question-sign", "style": ""}, {'doctype': "Workflow State", "workflow_state_name": "Approved", "icon": "ok-sign", "style": "Success"}, {'doctype': "Workflow State", "workflow_state_name": "Rejected", "icon": "remove", "style": "Danger"}, {'doctype': "Workflow Action", "workflow_action_name": "Approve"}, {'doctype': "Workflow Action", "workflow_action_name": "Reject"}, {'doctype': "Workflow Action", "workflow_action_name": "Review"}, {'doctype': "Email Domain", "domain_name":"example.com", "email_id": "[email protected]", "password": "pass", "email_server": "imap.example.com","use_imap": 1, "smtp_server": "smtp.example.com"}, {'doctype': "Email Account", "domain":"example.com", "email_id": "[email protected]", "default_outgoing": 1}, {'doctype': "Email Account", "domain":"example.com", "email_id": "[email protected]", "default_incoming": 1} ] for d in install_docs: try: frappe.get_doc(d).insert() except frappe.NameError: pass def get_admin_password(): def ask_admin_password(): admin_password = getpass.getpass("Set Administrator password: ") admin_password2 = getpass.getpass("Re-enter Administrator password: ") if not admin_password == admin_password2: print("\nPasswords do not match") return ask_admin_password() return admin_password admin_password = frappe.conf.get("admin_password") if not admin_password: return ask_admin_password() return admin_password def before_tests(): if len(frappe.get_installed_apps()) > 1: # don't run before tests if any other app is installed return frappe.db.sql("delete from `tabCustom Field`") frappe.db.sql("delete from `tabEvent`") frappe.db.commit() frappe.clear_cache() # complete setup if missing from frappe.desk.page.setup_wizard.setup_wizard import setup_complete if not int(frappe.db.get_single_value('System Settings', 'setup_complete') or 0): setup_complete({ "language" :"english", "email" :"[email protected]", "full_name" :"Test User", "password" :"test", "country" :"United States", "timezone" :"America/New_York", "currency" :"USD" }) frappe.db.commit() frappe.clear_cache() def import_country_and_currency(): from frappe.geo.country_info import get_all from frappe.utils import update_progress_bar data = get_all() for i, name in enumerate(data): update_progress_bar("Updating country info", i, len(data)) country = frappe._dict(data[name]) add_country_and_currency(name, country) print("") # enable frequently used currencies for currency in ("INR", "USD", "GBP", "EUR", "AED", "AUD", "JPY", "CNY", "CHF"): frappe.db.set_value("Currency", currency, "enabled", 1) def add_country_and_currency(name, country): if not frappe.db.exists("Country", name): frappe.get_doc({ "doctype": "Country", "country_name": name, "code": country.code, "date_format": country.date_format or "dd-mm-yyyy", "time_zones": "\n".join(country.timezones or []), "docstatus": 0 }).db_insert() if country.currency and not frappe.db.exists("Currency", country.currency): frappe.get_doc({ "doctype": "Currency", "currency_name": country.currency, "fraction": country.currency_fraction, "symbol": country.currency_symbol, "fraction_units": country.currency_fraction_units, "smallest_currency_fraction_value": country.smallest_currency_fraction_value, "number_format": country.number_format, "docstatus": 0 }).db_insert()
0.030397
""" Annotation Component Page. """ from bok_choy.page_object import PageObject from selenium.webdriver import ActionChains class AnnotationComponentPage(PageObject): """ View of annotation component page. """ url = None active_problem = 0 def is_browser_on_page(self): return self.q(css='.annotatable-title').present @property def component_name(self): """ Return the current problem name. """ return self.q(css='.annotatable-title').text[0] def click_reply_annotation(self, problem): """ Mouse over on annotation selector and click on "Reply to Annotation". """ annotation_span_selector = '.annotatable-span[data-problem-id="{}"]'.format(problem) self.mouse_hover(self.browser.find_element_by_css_selector(annotation_span_selector)) self.wait_for_element_visibility(annotation_span_selector, "Reply to Annotation link is visible") annotation_reply_selector = '.annotatable-reply[data-problem-id="{}"]'.format(problem) self.q(css=annotation_reply_selector).click() self.active_problem = problem def active_problem_selector(self, sub_selector): """ Return css selector for current active problem with sub_selector. """ return 'div[data-problem-id="{}"] {}'.format( self.q(css='.vert-{}'.format(self.active_problem + 1)).map( lambda el: el.get_attribute('data-id')).results[0], sub_selector, ) def mouse_hover(self, element): """ Mouse over on given element. """ mouse_hover_action = ActionChains(self.browser).move_to_element(element) mouse_hover_action.perform() def check_scroll_to_problem(self): """ Return visibility of active problem's input selector. """ annotation_input_selector = self.active_problem_selector('.annotation-input') return self.q(css=annotation_input_selector).visible def answer_problem(self): """ Submit correct answer for active problem. """ self.q(css=self.active_problem_selector('.comment')).fill('Test Response') answer_css = self.active_problem_selector('.tag[data-id="{}"]'.format(self.active_problem)) # Selenium will first move the element into view then click on it. self.q(css=answer_css).click() # Wait for the click to take effect, which is after the class is applied. self.wait_for(lambda: 'selected' in self.q(css=answer_css).attrs('class')[0], description='answer selected') # Click the "Check" button. self.q(css=self.active_problem_selector('.submit')).click() # This will trigger a POST to problem_check so wait until the response is returned. self.wait_for_ajax() def check_feedback(self): """ Return visibility of active problem's feedback. """ self.wait_for_element_visibility( self.active_problem_selector('.tag-status.correct'), "Correct is visible" ) return self.q(css=self.active_problem_selector('.tag-status.correct')).visible def click_return_to_annotation(self): """ Click on active problem's "Return to Annotation" link. """ self.q(css=self.active_problem_selector('.annotation-return')).click() def check_scroll_to_annotation(self): """ Return visibility of active annotation component header. """ annotation_header_selector = '.annotation-header' return self.q(css=annotation_header_selector).visible
0.003566
import tweepy import logging import time from crawler.src.CRUD import CRUD from crawler.src.db.DBConnection import DBConnection class TwitterApiScrap: """ Define the initial parameters and create the stream object for fetching and storing the tweets. """ def __init__(self, path_home, conn_sec, schema, table, consumer_key, consumer_secret, access_token, access_token_secret, geo=None, search_word=None): self.geo = geo self.path_home = path_home self.conn_sec = conn_sec self.conn_schema = schema self.conn_table = table self.search_word = search_word self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.access_token = access_token self.access_token_secret = access_token_secret self.running = False # Create database connection to store the tweets self.CRUD = CRUD(self.path_home, self.conn_sec) # Create database table if it does not exist self.create_table() # Create the Twitter Stream object if running variable is False while True: if not self.running: self.init() def create_table(self): """ Create the tweets's database table from the template (file template-table.sql). """ try: conn = DBConnection(self.path_home, self.conn_sec).connect_database() try: template = open(self.path_home + '/template-table.sql', 'r').read() % \ (str(self.conn_schema), str(self.conn_table), str(self.conn_table), str(self.conn_schema), str(self.conn_table), str(self.conn_schema), str(self.conn_table), str(self.conn_table), str(self.conn_schema), str(self.conn_table)) cur = conn.cursor() cur.execute(template) conn.commit() except Exception as e: raise e finally: conn.close() except Exception as e: logging.error(e) pass def init(self): auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret) auth.set_access_token(self.access_token, self.access_token_secret) try: my_stream_listener = MyStreamListener my_stream = tweepy.Stream(auth=auth, listener=my_stream_listener( crud=self.CRUD, conn_sec=self.conn_sec, conn_schema=self.conn_schema, conn_table=self.conn_table)) # Choose the kind of stream - either bounding box or word track. if self.search_word: my_stream.filter(track=[self.search_word], async=True) else: my_stream.filter(locations=self.geo, async=True) # Check if the connection stream is active and # break if it is not. init() function will restart # the connection stream. self.running = my_stream.running while True: if not my_stream.running: self.running = False time.sleep(60) # Check each 60 sec. break except Exception as e: logging.error(e) pass class MyStreamListener(tweepy.StreamListener): def __init__(self, crud, conn_sec, conn_schema, conn_table): self.crud = crud self.conn_sec = conn_sec self.conn_schema = conn_schema self.conn_table = conn_table super(MyStreamListener, self).__init__() def on_data(self, raw_data): self.crud.save(raw_data, self.conn_schema + '.' + self.conn_table) def on_exception(self, exception): logging.error(exception) pass def on_connect(self): logging.info('Connection ' + self.conn_sec + ' established!!') pass def on_disconnect(self, notice): logging.info('Connection ' + self.conn_sec + ' lost!! : ', notice) pass def on_error(self, status): logging.error(status) pass
0.001142
#!/usr/bin/env python # Copyright (c) 2005 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ali Saidi # Nathan Binkert import os, os.path, re, socket, sys from os import environ as env, listdir from os.path import basename, isdir, isfile, islink, join as joinpath, normpath from filecmp import cmp as filecmp from shutil import copy def nfspath(dir): if dir.startswith('/.automount/'): dir = '/n/%s' % dir[12:] elif not dir.startswith('/n/'): dir = '/n/%s%s' % (socket.gethostname().split('.')[0], dir) return dir def syncdir(srcdir, destdir): srcdir = normpath(srcdir) destdir = normpath(destdir) if not isdir(destdir): sys.exit('destination directory "%s" does not exist' % destdir) for root, dirs, files in os.walk(srcdir): root = normpath(root) prefix = os.path.commonprefix([root, srcdir]) root = root[len(prefix):] if root.startswith('/'): root = root[1:] for rem in [ d for d in dirs if d.startswith('.') or d == 'SCCS']: dirs.remove(rem) for entry in dirs: newdir = joinpath(destdir, root, entry) if not isdir(newdir): os.mkdir(newdir) print 'mkdir', newdir for i,d in enumerate(dirs): if islink(joinpath(srcdir, root, d)): dirs[i] = joinpath(d, '.') for entry in files: dest = normpath(joinpath(destdir, root, entry)) src = normpath(joinpath(srcdir, root, entry)) if not isfile(dest) or not filecmp(src, dest): print 'copy %s %s' % (dest, src) copy(src, dest) progpath = nfspath(sys.path[0]) progname = basename(sys.argv[0]) usage = """\ Usage: %(progname)s [-c] [-e] [-f] [-j <jobfile>] [-q queue] [-v] <regexp> -c clean directory if job can be run -C submit the checkpointing runs -d Make jobs be dependent on the completion of the checkpoint runs -e only echo pbs command info, don't actually send the job -f force the job to run regardless of state -q <queue> submit job to the named queue -j <jobfile> specify the jobfile (default is <rootdir>/Test.py) -v be verbose %(progname)s [-j <jobfile>] -l [-v] <regexp> -j <jobfile> specify the jobfile (default is <rootdir>/Test.py) -l list job names, don't submit -v be verbose (list job parameters) %(progname)s -h -h display this help """ % locals() try: import getopt opts, args = getopt.getopt(sys.argv[1:], '-Ccdefhj:lnq:Rt:v') except getopt.GetoptError: sys.exit(usage) depend = False clean = False onlyecho = False exprs = [] force = False listonly = False queue = '' verbose = False jfile = 'Test.py' docpts = False doruns = True runflag = False node_type = 'FAST' update = True for opt,arg in opts: if opt == '-C': docpts = True if opt == '-c': clean = True if opt == '-d': depend = True if opt == '-e': onlyecho = True if opt == '-f': force = True if opt == '-h': print usage sys.exit(0) if opt == '-j': jfile = arg if opt == '-l': listonly = True if opt == '-n': update = False if opt == '-q': queue = arg if opt == '-R': runflag = True if opt == '-t': node_type = arg if opt == '-v': verbose = True if docpts: doruns = runflag for arg in args: exprs.append(re.compile(arg)) import jobfile, pbs from job import JobDir, date conf = jobfile.JobFile(jfile) if update and not listonly and not onlyecho and isdir(conf.linkdir): if verbose: print 'Checking for outdated files in Link directory' if not isdir(conf.basedir): os.mkdir(conf.basedir) syncdir(conf.linkdir, conf.basedir) jobnames = {} joblist = [] if docpts and doruns: gen = conf.alljobs() elif docpts: gen = conf.checkpoints() elif doruns: gen = conf.jobs() for job in gen: if job.name in jobnames: continue if exprs: for expr in exprs: if expr.match(job.name): joblist.append(job) break else: joblist.append(job) if listonly: if verbose: for job in joblist: job.printinfo() else: for job in joblist: print job.name sys.exit(0) if not onlyecho: newlist = [] for job in joblist: jobdir = JobDir(joinpath(conf.rootdir, job.name)) if jobdir.exists(): if not force: status = jobdir.getstatus() if status == 'queued': continue if status == 'running': continue if status == 'success': continue if not clean: sys.exit('job directory %s not clean!' % jobdir) jobdir.clean() newlist.append(job) joblist = newlist class NameHack(object): def __init__(self, host='pbs.pool', port=24465): self.host = host self.port = port self.socket = None def setname(self, jobid, jobname): try: jobid = int(jobid) except ValueError: jobid = int(jobid.strip().split('.')[0]) jobname = jobname.strip() # since pbs can handle jobnames of 15 characters or less, # don't use the raj hack. if len(jobname) <= 15: return if self.socket is None: import socket self.socket = socket.socket() # Connect to pbs.pool and send the jobid/jobname pair to port # 24465 (Raj didn't realize that there are only 64k ports and # setup inetd to point to port 90001) self.socket.connect((self.host, self.port)) self.socket.send("%s %s\n" % (jobid, jobname)) namehack = NameHack() for job in joblist: jobdir = JobDir(joinpath(conf.rootdir, job.name)) if depend: cptdir = JobDir(joinpath(conf.rootdir, job.checkpoint.name)) cptjob = cptdir.readval('.pbs_jobid') if not onlyecho: jobdir.create() print 'Job name: %s' % job.name print 'Job directory: %s' % jobdir qsub = pbs.qsub() qsub.pbshost = 'simpool.eecs.umich.edu' qsub.stdout = jobdir.file('jobout') qsub.name = job.name[:15] qsub.join = True qsub.node_type = node_type qsub.env['ROOTDIR'] = conf.rootdir qsub.env['JOBNAME'] = job.name if depend: qsub.afterok = cptjob if queue: qsub.queue = queue qsub.build(joinpath(progpath, 'job.py')) if verbose: print 'PBS Command: %s' % qsub.command if not onlyecho: ec = qsub.do() if ec == 0: jobid = qsub.result print 'PBS Jobid: %s' % jobid namehack.setname(jobid, job.name) queued = date() jobdir.echofile('.pbs_jobid', jobid) jobdir.echofile('.pbs_jobname', job.name) jobdir.echofile('.queued', queued) jobdir.setstatus('queued on %s' % queued) else: print 'PBS Failed'
0.001482
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Computes a header file to be used with SELECTIVE_REGISTRATION. See the executable wrapper, print_selective_registration_header.py, for more information. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str): """Gets the ops and kernels needed from the model files.""" ops = set() for proto_file in proto_files: tf_logging.info('Loading proto file %s', proto_file) # Load GraphDef. file_data = gfile.GFile(proto_file, 'rb').read() if proto_fileformat == 'rawproto': graph_def = graph_pb2.GraphDef.FromString(file_data) else: assert proto_fileformat == 'textproto' graph_def = text_format.Parse(file_data, graph_pb2.GraphDef()) # Find all ops and kernels used by the graph. for node_def in graph_def.node: if not node_def.device: node_def.device = '/cpu:0' kernel_class = pywrap_tensorflow.TryFindKernelClass( node_def.SerializeToString()) if kernel_class: op_and_kernel = (str(node_def.op), str(kernel_class.decode('utf-8'))) if op_and_kernel not in ops: ops.add(op_and_kernel) else: print( 'Warning: no kernel found for op %s' % node_def.op, file=sys.stderr) # Add default ops. if default_ops_str and default_ops_str != 'all': for s in default_ops_str.split(','): op, kernel = s.split(':') op_and_kernel = (op, kernel) if op_and_kernel not in ops: ops.add(op_and_kernel) return list(sorted(ops)) def get_header_from_ops_and_kernels(ops_and_kernels, include_all_ops_and_kernels): """Returns a header for use with tensorflow SELECTIVE_REGISTRATION. Args: ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include. include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op kernels are included. Returns: the string of the header that should be written as ops_to_register.h. """ ops = set([op for op, _ in ops_and_kernels]) result_list = [] def append(s): result_list.append(s) _, script_name = os.path.split(sys.argv[0]) append('// This file was autogenerated by %s' % script_name) append('#ifndef OPS_TO_REGISTER') append('#define OPS_TO_REGISTER') if include_all_ops_and_kernels: append('#define SHOULD_REGISTER_OP(op) true') append('#define SHOULD_REGISTER_OP_KERNEL(clz) true') append('#define SHOULD_REGISTER_OP_GRADIENT true') else: line = ''' namespace { constexpr const char* skip(const char* x) { return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x; } constexpr bool isequal(const char* x, const char* y) { return (*skip(x) && *skip(y)) ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1)) : (!*skip(x) && !*skip(y)); } template<int N> struct find_in { static constexpr bool f(const char* x, const char* const y[N]) { return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1); } }; template<> struct find_in<0> { static constexpr bool f(const char* x, const char* const y[]) { return false; } }; } // end namespace ''' line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n' for _, kernel_class in ops_and_kernels: line += '"%s",\n' % kernel_class line += '};' append(line) append('#define SHOULD_REGISTER_OP_KERNEL(clz) ' '(find_in<sizeof(kNecessaryOpKernelClasses) ' '/ sizeof(*kNecessaryOpKernelClasses)>::f(clz, ' 'kNecessaryOpKernelClasses))') append('') append('constexpr inline bool ShouldRegisterOp(const char op[]) {') append(' return false') for op in sorted(ops): append(' || isequal(op, "%s")' % op) append(' ;') append('}') append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)') append('') append('#define SHOULD_REGISTER_OP_GRADIENT ' + ( 'true' if 'SymbolicGradient' in ops else 'false')) append('#endif') return '\n'.join(result_list) def get_header(graphs, proto_fileformat='rawproto', default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'): """Computes a header for use with tensorflow SELECTIVE_REGISTRATION. Args: graphs: a list of paths to GraphDef files to include. proto_fileformat: optional format of proto file, either 'textproto' or 'rawproto' (default). default_ops: optional comma-separated string of operator:kernel pairs to always include implementation for. Pass 'all' to have all operators and kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'. Returns: the string of the header that should be written as ops_to_register.h. """ ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops) if not ops_and_kernels: print('Error reading graph!') return 1 return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')
0.006058
import sqlite3 import os.path import sys import random import glob def makeDatabase(databaseName): if databaseName[-7:] != ".sqlite": databaseName = databaseName + ".sqlite" conn = sqlite3.connect(databaseName) conn.commit() conn.close() def listToString(list): string = "" for i in list: string += str(i)+"\t" return string[:-1] def stringToList(string): list = [str(line) for line in string.split('\t')] return list def listdir_nohidden(path): # Return only the non-hidden files in a directory, to avoid that annoying .DS_Store file return glob.glob(os.path.join(path, '*')) #class for connecting, inserting, and retrieving information from a sqlite3 database class SqliteDB: #connects to the database, alters its name if named incorrectly def __init__(self, databaseName): if databaseName[-7:] != ".sqlite": databaseName = databaseName + ".sqlite" if os.path.isfile(databaseName): self.databaseName = databaseName; self.conn = sqlite3.connect(self.databaseName) self.cursor = self.conn.cursor() else: #sees if database name is unique, so it doesn't overwrite anything sys.exit("This database does not exist, use the makeDatabase(databaseName) to create it") def createTables(self): #creates tables if they do not exist self.cursor.execute("CREATE TABLE IF NOT EXISTS students (row INTEGER PRIMARY KEY NOT NULL, fullName unicode, wID text, email text, UNIQUE(wID, email) UNIQUE(fullName, email) ON CONFLICT ABORT)") self.cursor.execute("CREATE TABLE IF NOT EXISTS submissions (row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, URL text, metadata text)") self.cursor.execute("CREATE TABLE IF NOT EXISTS assignments (row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, questionIndex int, URL text)") # self.cursor.execute("CREATE TABLE IF NOT EXISTS uniqueStudentURL (labNumber int, wID text, URL text, UNIQUE(URL) ON CONFLICT ABORT)") self.cursor.execute("CREATE TABLE IF NOT EXISTS experts (row INTEGER PRIMARY KEY NOT NULL, labNumber int, URL text, itemIndex int, response text, hidden boolean, practice boolean)") self.cursor.execute("CREATE TABLE IF NOT EXISTS responses (row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, wQuestion int, URL text, itemIndex int, response text)") self.cursor.execute("CREATE TABLE IF NOT EXISTS questions (row INTEGER PRIMARY KEY NOT NULL, labNumber int, questionIndex int, wQuestion int, practice boolean)") self.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (row INTEGER PRIMARY KEY NOT NULL, labNumber int, itemIndex int, itemType text, itemPrompt text, graded boolean)") self.cursor.execute("CREATE TABLE IF NOT EXISTS responseKeys (row INTEGER PRIMARY KEY NOT NULL, labNumber int, itemIndex int, response int, score number)") # weightString = '' # for i in range(6): # weightString += ', weight'+str(i+1)+' num' # self.cursor.execute("CREATE TABLE IF NOT EXISTS weightsBIBI (row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text"+weightString+", weightSum num)") self.cursor.execute("CREATE TABLE IF NOT EXISTS weights(row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, weightType text, itemIndex int, weight number)") self.cursor.execute("CREATE TABLE IF NOT EXISTS itemGrades(row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, URL text, itemIndex int, rawScore number, grade number, calibrated boolean)") self.cursor.execute("CREATE TABLE IF NOT EXISTS finalGrades(row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, URL text, rawScore number, grade number, calibrated boolean)") # Calculate calibratoin grades self.cursor.execute("CREATE TABLE IF NOT EXISTS calibrationGrades (row INTEGER PRIMARY KEY NOT NULL, labNumber int, wID text, nCalibration int, calibratedItems text, weightType text, rawScore number, grade number, UNIQUE(wID, labNumber, nCalibration, calibratedItems, weightType))") ##check to see if the tables have already been created #creates columns in tables for each lab specified self.conn.commit() #adds a person into the database, works for both new users and existing ones def addSubmission(self, wID, URL, labNumber, metadata = None): if self.databaseName != None and self.conn != None and self.cursor !=None: self.cursor.execute("INSERT INTO submissions VALUES(?,?,?,?,?)", [None,labNumber, wID, URL ,metadata]) # self.conn.commit() def addEmail(self, wID, email): try: self.cursor.execute("INSERT INTO students VALUES (?,?,?)", [wID, email]) except: print "wID: " + wID + " or email: " + email + " already in database." #retrieves URL for a specific student and specific lab number def getURL(self, wID, labNumber): self.cursor.execute("SELECT URL FROM submissions WHERE labNumber=? AND wID=?", [labNumber, wID]) URL = self.cursor.fetchone(); if URL is not None: return (URL[0]) else: return None def addExpertURL(self, labNumber, URL, itemIndex, response, hidden, practice): # self.cursor.execute("SELECT * FROM experts WHERE URL = ?", [URL]) #adds in a user if not in database already self.cursor.execute("INSERT INTO experts VALUES (NULL, ?, ?, ?, ?, ?, ?)", [labNumber, URL, itemIndex, response, hidden, practice]) self.conn.commit() ##find a way to make seperate expert tables for each lab, and then join them together to prevent the staggaring of grades in the excel sheet #self.cursor.execute("SELECT * FROM expert WHERE Lab1Grade") #print self.cursor.fetchall() #query = ("SELECT {0} FROM expert WHERE wID def getExpertURLs(self, labNumber): self.cursor.execute("SElECT URL, grade FROM experts where labNumber=?", [labNumber]) URLsAndGrades = {} for d in self.cursor.fetchall(): URLsAndGrades[str(d[0])] = stringToList(str(d[1])) return URLsAndGrades def finalize(self, labNumber, seed, N, MOOC=False): ##randomize the youtube URLs #for each wID #put that into the databse under the student ID self.cursor.execute("SELECT URL, hidden FROM experts WHERE labNumber=?", [labNumber]) expertURL = [] hiddenURL = [] for d in self.cursor.fetchall(): if d[1] == 1: hiddenURL.append(str(d[0])) elif d[1] == 0: expertURL.append(str(d[0])) #deprecated code, due to its slowness #self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=0", [labNumber]) #expertURL = [str(d[0]) for d in self.cursor.fetchall()] # find all the hidden expert videos #self.cursor.execute("SELECT URL FROM experts WHERE labNumber=? and hidden=1", [labNumber]) #hiddenURL = [str(d[0]) for d in self.cursor.fetchall()] #get all the studnet URLs self.cursor.execute("SELECT wID, URL from submissions WHERE labNumber=?", [labNumber]) data = [] URLTowIDDict = {} pseudoURL = {} for d in self.cursor.fetchall(): data.append(str(d[1])) URLTowIDDict[str(d[1])] = str(d[0]) print "original data size", len(data) #assign the students whos videos are designated expert graded URLs to grade, and remove them from the URL pool retrieved above if len(expertURL) + N + 1 <= len(data): for d in expertURL: #if the expertURL is not in the data list, then it is a video that is not submitted by a student this sem #semester, in which case, we skip it if d in data: #self.cursor.execute("SELECT wID FROM submissions WHERE URL=?", [d]) indice = (data.index(d) + 1) % len(data) while data[indice] in expertURL or data[indice] in hiddenURL: indice = (indice + 1) % len(data) pseudoURL[d] = data[indice] data.remove(d) for d in hiddenURL: if d in data: indice = (data.index(d) + 1) % len(data) while data[indice] in expertURL or data[indice] in hiddenURL: indice = (indice + 1) % len(data) pseudoURL[d] = data[indice] data.remove(d) self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? and URL is ''", [labNumber]) noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()] wIDPseudoURL = {} if(data.count('') > 0) and not MOOC: for d in noURLSubmitted: indice = (data.index('') + 1) % len(data) while data[indice] == '': indice = (indice + 1) % len(data) wIDPseudoURL[d] = data[indice] data.remove('') else: while '' in data: data.remove('') self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=? AND URL=?", [labNumber, "DUPLICATEURL"]) noURLSubmitted = [str(d[0]) for d in self.cursor.fetchall()] if(data.count("DUPLICATEURL") > 0) and not MOOC: for d in noURLSubmitted: indice = (data.index("DUPLICATEURL") + 1) % len(data) while data[indice] == "DUPLICATEURL": indice = (indice + 1) % len(data) wIDPseudoURL[d] = data[indice] data.remove("DUPLICATEURL") else: while '' in data: data.remove('') #self.cursor.execute(query) random.shuffle(data) selectFrom = data + data[:N + len(expertURL) + 1] if len(pseudoURL.keys()) > 0: # params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL") for key in pseudoURL.keys(): startIndex = selectFrom.index(pseudoURL[key]) URLSToGrade = selectFrom[startIndex: startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) for i in range(0, len(URLSToGrade)): #i+1 because we want item index to start at 1 self.cursor.execute("INSERT INTO assignments VALUES(NULL, ?, ?, ?, ?)", [labNumber, key, i+1, URLSToGrade[i]]) self.conn.commit() if len(wIDPseudoURL.keys()) > 0: for key in wIDPseudoURL.keys(): startIndex = selectFrom.index(wIDPseudoURL[key]) URLSToGrade = selectFrom[startIndex: startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) for i in range(0, len(URLSToGrade)): #i+1 because we want item index to start at 1 self.cursor.execute("INSERT INTO assignments VALUES(NULL, ?, ?, ?, ?)", [labNumber, key, i+1, URLSToGrade[i]]) self.conn.commit() if len(data) > N: for d in data: startIndex = selectFrom.index(d) URLSToGrade = selectFrom[startIndex:startIndex+N+1] for i in hiddenURL: URLSToGrade.append(i) random.shuffle(URLSToGrade) # params = ("Lab" + str(labNumber) + "URLSToGrade", "Lab" + str(labNumber) + "URL") # self.cursor.execute("UPDATE submissions SET URLsToGrade=? WHERE URL=? and labNumber=?", [listToString(expertURL + URLSToGrade), d, labNumber]) for i in range(0, len(URLSToGrade)): #i+1 because we want item index to start at 1 self.cursor.execute("INSERT INTO assignments VALUES(NULL, ?, ?, ?, ?)", [labNumber, URLTowIDDict[d], i+1, URLSToGrade[i]]) self.conn.commit() ############################################################################################################## #will comment out once these conditions are met, due to some issues with encrypting URLs due to FERPA. ############################################################################################################## #checks to see if each URL was assigned >4 times self.cursor.execute("SELECT URL from assignments") #checks is the URLs in the Assignment table checks = [str(d[0]) for d in self.cursor.fetchall()] print "-sanity checks, checks to see if URL was assigned more than N+1 or less than N" for URL in set(checks): if checks.count(URL)>N+1: print checks.count(URL), URL, data.count(URL), "->num of times assigned, URL, number of times submitted, respectively" elif checks.count(URL) < N: print checks.count(URL), URL, data.count(URL), "->num of times assigned, URL, number of times submitted, respectively" print "-end of checks to see if URL was assigned the correct number of times" #is the # of times that hiddenURL is assigned the same as submissions? for item in set(data): if item not in checks: print item + " not in checks" print "length of set(checks) vs length of checks:", len(set(checks)), len(checks) print "length of set(data) vs length of data:", len(set(data)), len(data) dictThings = [URLTowIDDict[d] for d in URLTowIDDict.keys()] for i in dictThings: if dictThings.count(i)>1: print i, "is in URLtowIDDict more than once" #print hiddenURL #print checks.count(hiddenURL[0]) self.cursor.execute("SELECT wID FROM submissions WHERE labNumber=3") print "number of unique wIDs in submissions:", len(set(self.cursor.fetchall())) self.cursor.execute("SELECT wID FROM assignments") print "number of unique wIDs in assignments:", len(set(self.cursor.fetchall())) ########################################################################################################## def getURLsToGrade(self, wID, labNumber): self.cursor.execute("Select URL FROM assignments WHERE wID=? and labNumber=? ORDER BY questionIndex", [wID, labNumber]) dbExtract = [entry for entry in self.cursor.fetchall()] if dbExtract == []: return False else: return [str(i[0]) for i in dbExtract] def addResponse(self, labNumber, wID, wQuestion, itemIndex, response): self.cursor.execute("INSERT INTO responses VALUES(NULL, ?, ?, ?, ?, ?)", [labNumber, wID, wQuestion, itemIndex, response]) self.conn.commit() def wIDGradesSubmitted(self, wID, labNumber): URLsToGrade = self.getURLsToGrade(wID, labNumber) gradesSubmitted = {} for URL in URLsToGrade: self.cursor.execute("SElECT grade FROM grades WHERE wID = ? AND URL = ?",[wID, URL]) dbExtract = self.cursor.fetchall() #if they did not grade the URL assigned to them if dbExtract!=[]: gradesSubmitted[URL] = stringToList(str(dbExtract[0][0])) else: gradesSubmitted[URL] = None return gradesSubmitted def compareToExpert(self, wID, labNumber): expertURLsAndGrades = self.getExpertURLs(labNumber) userSubmittedGrades = self.wIDGradesSubmitted(wID, labNumber) URLsGraded = userSubmittedGrades.keys() for key in expertURLsAndGrades.keys(): if key in URLsGraded: print expertURLsAndGrades[key] print userSubmittedGrades[key] def getGrades(self, wID, labNumber): URL = self.getURL(wID, labNumber) self.cursor.execute("SELECT grade,wID FROM grades WHERE URL=?", [URL]) grades = {} for d in self.cursor.fetchall(): grades[str(d[1])] = str(d[0]) return grades def check(self, labNumber): # params = ("Lab" + str(labNumber) + "URL", "Lab" + str(labNumber) + "URLsToGrade", None) self.cursor.execute("Select URL, URLsToGrade FROM submissions WHERE URL!= ''") fetch = self.cursor.fetchall() individualURL = [str(d[0]) for d in fetch] URLList = listToString([str(d[1]) for d in fetch]) for i in range(1, len(individualURL)-1): if individualURL[i] not in stringToList(URLList[i]): print individualURL[i] return False return True
0.028856
from __future__ import (absolute_import, division, print_function, unicode_literals) import numpy as np from .geom import geom class geom_text(geom): DEFAULT_AES = {'alpha': None, 'angle': 0, 'color': 'black', 'family': None, 'fontface': 1, 'hjust': None, 'size': 12, 'vjust': None, 'lineheight': 1.2} REQUIRED_AES = {'label','x','y'} DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity', 'parse': False} _aes_renames = {'angle': 'rotation', 'lineheight': 'linespacing'} _units = {'alpha', 'color', 'family', 'size'} def _plot_unit(self, pinfo, ax): x = pinfo.pop('x') y = pinfo.pop('y') label = pinfo.pop('label') # TODO: Deal with the fontface # from ggplot2 # 1 = plain, 2 = bold, 3 = italic, 4 = bold italic # "plain", "bold", "italic", "oblique", and "bold.italic" pinfo.pop('fontface') # before taking max and min make sure x is not empty if len(x) == 0: return # plt.text does not resize axes, must do manually xmax = max(x) xmin = min(x) ymax = max(y) ymin = min(y) margin = 0.1 xmargin = (xmax - xmin) * margin ymargin = (ymax - ymin) * margin xmax = xmax + xmargin xmin = xmin - xmargin ymax = ymax + ymargin ymin = ymin - ymargin # Take current plotting dimension in account for the case that we # work on a special dataframe just for this geom! if not self.data is None: # NOTE: not working?? cxmin, cxmax = ax.get_xlim() cymin, cymax = ax.get_ylim() # there is a problem if geom_text is the first plot, as # then the dimension are 0-1 for all axis :-( xmax = max(xmax, cxmax) xmin = min(xmin, cxmin) ymax = max(ymax, cymax) ymin = min(ymin, cymin) # TODO: Fix the defaults for this # try out 0.5 if pinfo['hjust'] is not None: x = (np.array(x) + pinfo['hjust']).tolist() else: pinfo['horizontalalignment'] = 'center' if pinfo['vjust'] is not None: y = (np.array(y) + pinfo['vjust']).tolist() else: pinfo['verticalalignment'] = 'center' del pinfo['hjust'] del pinfo['vjust'] for x_g,y_g,s in zip(x,y,label): ax.text(x_g,y_g,s,**pinfo) # TODO: Find out why this isn't working as desired # resize axes ax.axis([xmin, xmax, ymin, ymax])
0.004587
# Glumol - An adventure game creator # Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour # This file is part of Glumol. # Glumol is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # Glumol is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Glumol. If not, see <http://www.gnu.org/licenses/>. import sys from pypoujol import * from new import instancemethod from log import log from behaviour import * from script import get_full_name class GlumolNamespace: def __init__(self): self.globals = dict(globals()) self.locals = { } def clear(self): # self.globals = dict(globals()) # self.locals = { } pass def get_value(self, name): return self.globals[name] def call(self, func, args): self.locals["func"] = func self.locals["args"] = args exec "func(*args)" in self.locals, self.locals del self.locals["func"] del self.locals["args"] def eval(self, listing): return eval(listing, self.globals, self.locals) def run(self, listing, module = __name__, globs = None, locs = None): if not globs: globs = self.globals if not locs: locs = self.globals globs["__name__"] = module exec listing in globs, locs globs["__name__"] = "glumolnamespace" def getattr(self, name, *defaults): attrs = name.split('.') try: o = self.globals[attrs[0]] except: if len(defaults): try: o = self.locals[attrs[0]] except: o = defaults[0] else: o = self.locals[attrs[0]] for i in attrs[1:]: o = getattr(o, i, None) return o def create_object(self, classe, loc = None, args = tuple() ): if loc == None: loc = self.locals loc["classe"] = classe loc["args"] = args s = "classe(*args)" self.obj = newobj = eval(s, self.globals, self.locals) del loc["args"] del loc["classe"] return newobj def create_from_script(self, script, args = tuple() ): self.run(script.listing) try: classe = self.getattr(script.realname) if not classe: raise except: try: classe = self.getattr(script.name) if not classe: raise except: classe = self.getattr(get_full_name(script.name)) return self.create_object(classe, None, args)
0.016694
# # The Python Imaging Library. # $Id$ # # SGI image file handling # # See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. # <ftp://ftp.sgi.com/graphics/SGIIMAGESPEC> # # History: # 1995-09-10 fl Created # # Copyright (c) 2008 by Karsten Hiddemann. # Copyright (c) 1997 by Secret Labs AB. # Copyright (c) 1995 by Fredrik Lundh. # # See the README file for information on usage and redistribution. # __version__ = "0.2" from PIL import Image, ImageFile, _binary i8 = _binary.i8 i16 = _binary.i16be i32 = _binary.i32be def _accept(prefix): return i16(prefix) == 474 ## # Image plugin for SGI images. class SgiImageFile(ImageFile.ImageFile): format = "SGI" format_description = "SGI Image File Format" def _open(self): # HEAD s = self.fp.read(512) if i16(s) != 474: raise ValueError("Not an SGI image file") # relevant header entries compression = i8(s[2]) # bytes, dimension, zsize layout = i8(s[3]), i16(s[4:]), i16(s[10:]) # determine mode from bytes/zsize if layout == (1, 2, 1) or layout == (1, 1, 1): self.mode = "L" elif layout == (1, 3, 3): self.mode = "RGB" elif layout == (1, 3, 4): self.mode = "RGBA" else: raise ValueError("Unsupported SGI image mode") # size self.size = i16(s[6:]), i16(s[8:]) # decoder info if compression == 0: offset = 512 pagesize = self.size[0]*self.size[1]*layout[0] self.tile = [] for layer in self.mode: self.tile.append( ("raw", (0, 0)+self.size, offset, (layer, 0, -1))) offset = offset + pagesize elif compression == 1: raise ValueError("SGI RLE encoding not supported") # # registry Image.register_open("SGI", SgiImageFile, _accept) Image.register_extension("SGI", ".bw") Image.register_extension("SGI", ".rgb") Image.register_extension("SGI", ".rgba") Image.register_extension("SGI", ".sgi") # End of file
0.000474
# Generated by h2py from /usr/include/netinet/in.h # Included from sys/endian.h LITTLE_ENDIAN = 1234 BIG_ENDIAN = 4321 PDP_ENDIAN = 3412 BYTE_ORDER = BIG_ENDIAN BYTE_ORDER = LITTLE_ENDIAN def ntohl(x): return (x) def ntohs(x): return (x) def htonl(x): return (x) def htons(x): return (x) def htonl(x): return ntohl(x) def htons(x): return ntohs(x) # Included from sys/bsd_types.h # Included from sys/mkdev.h ONBITSMAJOR = 7 ONBITSMINOR = 8 OMAXMAJ = 0x7f OMAXMIN = 0xff NBITSMAJOR = 14 NBITSMINOR = 18 MAXMAJ = 0x1ff MAXMIN = 0x3ffff OLDDEV = 0 NEWDEV = 1 MKDEV_VER = NEWDEV def major(dev): return __major(MKDEV_VER, dev) def minor(dev): return __minor(MKDEV_VER, dev) # Included from sys/select.h FD_SETSIZE = 1024 NBBY = 8 IPPROTO_IP = 0 IPPROTO_ICMP = 1 IPPROTO_IGMP = 2 IPPROTO_GGP = 3 IPPROTO_ENCAP = 4 IPPROTO_TCP = 6 IPPROTO_EGP = 8 IPPROTO_PUP = 12 IPPROTO_UDP = 17 IPPROTO_IDP = 22 IPPROTO_TP = 29 IPPROTO_XTP = 36 IPPROTO_HELLO = 63 IPPROTO_ND = 77 IPPROTO_EON = 80 IPPROTO_RAW = 255 IPPROTO_MAX = 256 IPPORT_RESERVED = 1024 IPPORT_USERRESERVED = 5000 IPPORT_MAXPORT = 65535 def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0) IN_CLASSA_NET = 0xff000000 IN_CLASSA_NSHIFT = 24 IN_CLASSA_HOST = 0x00ffffff IN_CLASSA_MAX = 128 def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000) IN_CLASSB_NET = 0xffff0000 IN_CLASSB_NSHIFT = 16 IN_CLASSB_HOST = 0x0000ffff IN_CLASSB_MAX = 65536 def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000) IN_CLASSC_NET = 0xffffff00 IN_CLASSC_NSHIFT = 8 IN_CLASSC_HOST = 0x000000ff def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000) IN_CLASSD_NET = 0xf0000000 IN_CLASSD_NSHIFT = 28 IN_CLASSD_HOST = 0x0fffffff def IN_MULTICAST(i): return IN_CLASSD(i) def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000) def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000) INADDR_ANY = 0x00000000 INADDR_BROADCAST = 0xffffffff INADDR_LOOPBACK = 0x7F000001 INADDR_UNSPEC_GROUP = 0xe0000000 INADDR_ALLHOSTS_GROUP = 0xe0000001 INADDR_MAX_LOCAL_GROUP = 0xe00000ff INADDR_NONE = 0xffffffff IN_LOOPBACKNET = 127 IP_OPTIONS = 1 IP_MULTICAST_IF = 2 IP_MULTICAST_TTL = 3 IP_MULTICAST_LOOP = 4 IP_ADD_MEMBERSHIP = 5 IP_DROP_MEMBERSHIP = 6 IP_HDRINCL = 7 IP_TOS = 8 IP_TTL = 9 IP_RECVOPTS = 10 IP_RECVRETOPTS = 11 IP_RECVDSTADDR = 12 IP_RETOPTS = 13 IP_OPTIONS = 1 IP_HDRINCL = 2 IP_TOS = 3 IP_TTL = 4 IP_RECVOPTS = 5 IP_RECVRETOPTS = 6 IP_RECVDSTADDR = 7 IP_RETOPTS = 8 IP_MULTICAST_IF = 20 IP_MULTICAST_TTL = 21 IP_MULTICAST_LOOP = 22 IP_ADD_MEMBERSHIP = 23 IP_DROP_MEMBERSHIP = 24 IRIX4_IP_OPTIONS = 1 IRIX4_IP_MULTICAST_IF = 2 IRIX4_IP_MULTICAST_TTL = 3 IRIX4_IP_MULTICAST_LOOP = 4 IRIX4_IP_ADD_MEMBERSHIP = 5 IRIX4_IP_DROP_MEMBERSHIP = 6 IRIX4_IP_HDRINCL = 7 IRIX4_IP_TOS = 8 IRIX4_IP_TTL = 9 IRIX4_IP_RECVOPTS = 10 IRIX4_IP_RECVRETOPTS = 11 IRIX4_IP_RECVDSTADDR = 12 IRIX4_IP_RETOPTS = 13 IP_DEFAULT_MULTICAST_TTL = 1 IP_DEFAULT_MULTICAST_LOOP = 1 IP_MAX_MEMBERSHIPS = 20
0.004358
""" Bayesian Degree Corrected Stochastic Block Model roughly based on Infinite-degree-corrected stochastic block model by Herlau et. al., but with a fixed number of cluster sizes and a different update equation for the collapsed Gibbs sampler; see accompanying documentation """ import numpy as np import sys sys.path.append("/home/victor/Documents/community_detection/MCMC") from cgs_llhds import diri_multi_llhd from multi_sbm_helpers import comp_edge_cts, softmax from dcsbm_helpers import GD, BD, samp_shape_post_step, samp_rate_post_step, samp_gam_post_step class gen_data: def __init__(self, n, phis, eta): """ :param n: number of vertices in each community :param phis: list of probability distributions, phis[l] should be length n[l] and sum to 1 :param eta: symmetric matrix, eta[k,l] is expected number of edges between vertex in k and vertex in l """ self.n = n self.n_vert = sum(n) self.n_comm = len(n) self.phis = phis self.eta = eta z = np.repeat(0, self.n_vert) acc = 0 for l in range(self.n_comm - 1): acc += self.n[l] z[acc: acc + self.n[l + 1]] = l + 1 self.z = z phi = np.repeat(0., self.n_vert) phi[0:self.n[0]] = phis[0] acc = 0 for l in range(self.n_comm - 1): acc += self.n[l] phi[acc: acc + self.n[l + 1]] = phis[l + 1] self.phi = phi self.A = self.sampleA() def sampleA(self): """ Sample an adjacency matrix conditional on all other parameters :return ndarray, float. Sampled adjacency matrix """ A = np.zeros([self.n_vert, self.n_vert]) for i in range(self.n_vert): for j in range(i + 1, self.n_vert): thetai = self.n[self.z[i]] * self.phi[i] thetaj = self.n[self.z[j]] * self.phi[j] A[i, j] = np.random.poisson(thetai * thetaj * self.eta[self.z[i], self.z[j]]) A[j, i] = A[i, j] return A class gen_data_hypers(gen_data): "Sample a graph from the Bayesian DCSBM" def __init__(self, n_vert, n_comm, alpha, kap, lam, gam): """ :param n_vert: number of vertices in the graph :param n_comm: number of communities :param alpha: dirichlet prior parameter for community memberships :param kap: scalar, gamma dist param :param lam: scalar, gamma dist param :param gam: scalar, param for deg correction dirichlet dist. Basic block model recovered in gamma->inf limit """ self.n_vert = n_vert self.n_comm = n_comm self.alpha = alpha self.kap = kap self.lam = lam self.gam = gam self.n = np.random.multinomial(n_vert, np.random.dirichlet(np.repeat(alpha, n_comm))) z = np.repeat(0,n_vert) acc = 0 for l in range(n_comm-1): acc += self.n[l] z[acc : acc + self.n[l + 1]] = l+1 self.z=z phi_ls = [np.random.dirichlet(np.repeat(gam, nl)) for nl in self.n] phi = np.repeat(0.,self.n_vert) phi[0:self.n[0]] = phi_ls[0] acc = 0 for l in range(n_comm-1): acc += self.n[l] phi[acc : acc + self.n[l + 1]] = phi_ls[l+1] self.phis = phi_ls self.phi = phi eta = np.random.gamma(kap,1./lam,[n_comm,n_comm]) for k in range(n_comm): for l in range(k+1,n_comm): eta[k,l] = eta[l,k] self.eta = eta self.A = self.sampleA() class cgs: """ Collapsed Gibbs sampler for the Bayesian DCSBM """ def __init__(self, A, z, n_comm, alpha, kap, lam, gam): """ :param A: adjacency matrix of (multi) graph :param z: community identities of vertices :param n_comm: number of communities (in case some communities are empty at init) :param alpha: dirichlet prior parameter for community memberships :param kap: scalar, gamma dist param :param lam: scalar, gamma dist param :param gam: scalar, param for deg correction dirichlet dist. Basic block model recovered in gamma->inf limit """ self.A = A self.z = z self.n_comm = n_comm self.alpha = 1.*alpha self.kap = 1.*kap self.lam = 1.*lam self.gam = 1.*gam ''' compute initial values of sufficient stats ''' # comm_idxs[k] is list of indices of vertices in community k self.comm_idxs = [] for k in range(n_comm): self.comm_idxs.append([i for i, zi in enumerate(z) if zi == k]) # vertices in community k # n[k] is number of vertices in community k self.n = np.array([members.__len__() for members in self.comm_idxs]) # edge_cts[k,l] is number of edges between community k and community l, not counting self edges # warning: not counting self edges! self.edge_cts = comp_edge_cts(self.A, self.comm_idxs) self.n_vert = A.shape[0] # initial assignment for (latent) self-edges of the graph - shouldn't matter too much self.diags = np.repeat(0,self.n_vert) self.degs = np.sum(A,axis=1) # degree of each vertex # [vectors of the form [1 0 0 0], [0 1 0 0], etc., used to call diri-multi-llhd self._comm_indicators = [np.identity(n_comm, int)[j,:] for j in range(n_comm)] def sample_diags(self): """ Sample the self edges of the graph, which are treated as latent variables. Scheme for doing this is to sample eta, phi | z,A and then A_ii | eta,phi,z The relevant posterior distributions are derived in the Bayesian DCSBM paper """ eta_diags = np.zeros(self.n_vert ) # eta_diags[v] = eta_z[v]z[v] theta = np.zeros(self.n_vert ) # theta_l = n_l * phi_l for l in range(self.n_comm): #eta kap_post = self.kap + self.edge_cts[l,l] + np.sum(self.diags[self.comm_idxs[l]]) # kap_post = self.kap + self.edge_cts[l,l] lam_post = self.lam + self.n[l]**2 /2. # numpy uses a different convention for the parameterization of the gamma distribution eta_diags[self.comm_idxs[l]] = np.random.gamma(kap_post, 1./lam_post) # phi gam_post = self.gam + np.sum(self.A[self.comm_idxs[l],:],axis=1) + 2*self.diags[self.comm_idxs[l]] theta[self.comm_idxs[l]] = self.n[l] * np.random.dirichlet(gam_post) self.diags = np.random.poisson(0.5 * theta**2 * eta_diags) """ Functions to update zs """ def remove_vertex(self,v): """ removes vertex v from the sufficient stats :param v: integer, index of vertex to be removed """ edges_to_comm = [np.sum(self.A[v, comm]) for comm in self.comm_idxs] # vv: double checked this, it's fine self.comm_idxs[self.z[v]].remove(v) self.n[self.z[v]] -= 1 self.edge_cts[self.z[v], :] -= edges_to_comm self.edge_cts[:, self.z[v]] = self.edge_cts[self.z[v], :] # set label to invalid value. Should have no effect (except to throw error if invoked before being reassinged) self.z[v] = self.n_vert+1 def add_vertex(self,v,k): """ adds vertex v to community k, updating the sufficient stats of the SBM :param v: integer, index of vertex to be added :param k: integer, index of community to add vertex to """ # warning! this doesn't include self edges edges_to_comm = [np.sum(self.A[v, comm]) for comm in self.comm_idxs] self.z[v] = k self.comm_idxs[self.z[v]].append(v) self.n[k] += 1 self.edge_cts[self.z[v], :] += edges_to_comm self.edge_cts[:, self.z[v]] = self.edge_cts[self.z[v], :] def comm_llhd(self, v): """ computes a length n vector q such that q(k)-q(l) = log(P(A|z_-v,z_v =k)) - log(P(A|z_-v,z_v = l)) See associated documentation for derivation of these equations :param v: id of vertex with community identities to be computed :return: length n vector q such that q(k)-q(l) = log(P(A|z_-v,z_v =k)) - log(P(A|z_-v,z_v = l)) """ # dv_cts[m] is number of edges from v to community m, ignoring the self edges of (the communityless) v dv_cts = np.array([np.sum(self.A[v,comm]) for comm in self.comm_idxs]) tot_trials = 1.*np.outer(self.n, self.n) # number of pairs of vertices between comm k and l tot_trials[np.diag_indices_from(tot_trials)] = self.n ** 2 / 2. # possible connections to comm [l,l] is smaller log_comm_prob = np.zeros(self.n_comm) for k in range(self.n_comm): # SBM component edge_add = np.zeros(self.n_comm) tot_ct_add = np.zeros(self.n_comm) edge_add += dv_cts edge_add[k] += self.diags[v] tot_ct_add += self.n tot_ct_add[k] += 0.5 log_comm_prob[k] += np.sum(GD(self.edge_cts[k,:]+self.kap, tot_trials[k,:]+self.lam,edge_add,tot_ct_add)) #degree correction if self.n[k]!=0: deg_term = np.repeat(self.gam, self.n[k])+self.degs[self.comm_idxs[k]] log_comm_prob[k] += BD(deg_term, self.degs[v]+self.diags[v]+self.gam) \ - BD(np.repeat(self.gam,self.n[k]),self.gam) log_comm_prob[k] += (np.sum(self.edge_cts[k,:])+self.edge_cts[k,k]+dv_cts[k])*np.log(1.+1./self.n[k]) \ + (self.degs[v]+self.diags[v])*np.log(self.n[k]+1) return log_comm_prob def log_comm_prior(self): """ :return: log dirichlent multinomial values (prior for z), for pass to softmax """ return np.array([diri_multi_llhd(obs=comm_indic, alphas=self.alpha+self.n) for comm_indic in self._comm_indicators]) def update_z(self, v): """ Runs a single step of the collapsed gibbs sampler to resample the community identity of v :param v: integer, a vertex in the graph """ # sample new self edges of adjacency matrix # it's really irritating that this is required self.sample_diags() # add in the contribution from the self edges to the sufficient stats # self_edges[l] is number of self edges in community l self_edges = np.array([np.sum(self.diags[comm_idx]) for comm_idx in self.comm_idxs]) self.edge_cts[np.diag_indices_from(self.edge_cts)] += self_edges self.degs += self.diags ''' remove current vertex from sufficient stats ''' self.remove_vertex(v) ''' sample the new index conditional on all other community assignments using P(z_v = k | A, z_-v) \propto P( A | z_v = k, z_-v) * P(z_v = k | z_-v) ''' # P( A | z_v = k, z_-v) contribution log_comm_prob = self.comm_llhd(v) # P(z_v = k | z_-v) contribution log_comm_prob += self.log_comm_prior() # exponentiate and sample the new label comm_prob = softmax(log_comm_prob) new_comm = np.random.multinomial(1, comm_prob).nonzero()[0][0] ''' add the vertex back into the sufficient stats ''' self.add_vertex(v,new_comm) # clean up the self edges self.edge_cts[np.diag_indices_from(self.edge_cts)] -= self_edges self.degs -= self.diags def update_zs(self): """ Update the community indicators in all of the models """ vertex_order = range(self.n_vert) np.random.shuffle(vertex_order) for v in vertex_order: self.update_z(v) """ Functions to update higher level parameters (kap,lam,gam) """ def update_NB_params(self): kap = np.copy(self.kap) lam = np.copy(self.lam) tot_trials = 1. * np.outer(self.n, self.n) # number of pairs of vertices between comm k and l tot_trials[np.diag_indices_from(tot_trials)] = self.n ** 2 / 2. # possible connections to comm [l,l] is smaller # empty communities carry no information (but do cause divide by 0 errors -_-) ttm = np.ma.masked_values(tot_trials,0) em = np.ma.masked_array(self.edge_cts, ttm.mask) # count each community pair only once unique_pairs = np.triu_indices(self.n_comm) # update kappa # key observation is that e_lm ~ NB(kap, 1/(1+lam/n_lm)) # so we can use augmented conjugate update of Zhou&Carin 2012 ps = ttm[unique_pairs] / (ttm[unique_pairs] + lam) kap = samp_shape_post_step(em[unique_pairs].compressed(), kap, ps.compressed(), 0.1, 0.1) # update lambda # simple independent MH sampler lam = samp_rate_post_step(em[unique_pairs].compressed(), ttm[unique_pairs].compressed(), kap, lam) self.kap = kap self.lam = lam def update_gam(self): # metropolis-hasting update of gamma gam = np.copy(self.gam) terms = self.degs + 2*self.diags gam = samp_gam_post_step(terms, self.comm_idxs, gam) self.gam = gam
0.00775
from .. import util from ..util import sqla_compat from . import schemaobj from sqlalchemy.types import NULLTYPE from .base import Operations, BatchOperations import re class MigrateOperation(object): """base class for migration command and organization objects. This system is part of the operation extensibility API. .. versionadded:: 0.8.0 .. seealso:: :ref:`operation_objects` :ref:`operation_plugins` :ref:`customizing_revision` """ @util.memoized_property def info(self): """A dictionary that may be used to store arbitrary information along with this :class:`.MigrateOperation` object. """ return {} class AddConstraintOp(MigrateOperation): """Represent an add constraint operation.""" @property def constraint_type(self): raise NotImplementedError() @classmethod def from_constraint(cls, constraint): funcs = { "unique_constraint": CreateUniqueConstraintOp.from_constraint, "foreign_key_constraint": CreateForeignKeyOp.from_constraint, "primary_key_constraint": CreatePrimaryKeyOp.from_constraint, "check_constraint": CreateCheckConstraintOp.from_constraint, "column_check_constraint": CreateCheckConstraintOp.from_constraint, } return funcs[constraint.__visit_name__](constraint) def reverse(self): return DropConstraintOp.from_constraint(self.to_constraint()) def to_diff_tuple(self): return ("add_constraint", self.to_constraint()) @Operations.register_operation("drop_constraint") @BatchOperations.register_operation("drop_constraint", "batch_drop_constraint") class DropConstraintOp(MigrateOperation): """Represent a drop constraint operation.""" def __init__( self, constraint_name, table_name, type_=None, schema=None, _orig_constraint=None): self.constraint_name = constraint_name self.table_name = table_name self.constraint_type = type_ self.schema = schema self._orig_constraint = _orig_constraint def reverse(self): if self._orig_constraint is None: raise ValueError( "operation is not reversible; " "original constraint is not present") return AddConstraintOp.from_constraint(self._orig_constraint) def to_diff_tuple(self): if self.constraint_type == "foreignkey": return ("remove_fk", self.to_constraint()) else: return ("remove_constraint", self.to_constraint()) @classmethod def from_constraint(cls, constraint): types = { "unique_constraint": "unique", "foreign_key_constraint": "foreignkey", "primary_key_constraint": "primary", "check_constraint": "check", "column_check_constraint": "check", } constraint_table = sqla_compat._table_for_constraint(constraint) return cls( constraint.name, constraint_table.name, schema=constraint_table.schema, type_=types[constraint.__visit_name__], _orig_constraint=constraint ) def to_constraint(self): if self._orig_constraint is not None: return self._orig_constraint else: raise ValueError( "constraint cannot be produced; " "original constraint is not present") @classmethod @util._with_legacy_names([ ("type", "type_"), ("name", "constraint_name"), ]) def drop_constraint( cls, operations, constraint_name, table_name, type_=None, schema=None): """Drop a constraint of the given name, typically via DROP CONSTRAINT. :param constraint_name: name of the constraint. :param table_name: table name. :param ``type_``: optional, required on MySQL. can be 'foreignkey', 'primary', 'unique', or 'check'. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name """ op = cls(constraint_name, table_name, type_=type_, schema=schema) return operations.invoke(op) @classmethod def batch_drop_constraint(cls, operations, constraint_name, type_=None): """Issue a "drop constraint" instruction using the current batch migration context. The batch form of this call omits the ``table_name`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.drop_constraint` .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name """ op = cls( constraint_name, operations.impl.table_name, type_=type_, schema=operations.impl.schema ) return operations.invoke(op) @Operations.register_operation("create_primary_key") @BatchOperations.register_operation( "create_primary_key", "batch_create_primary_key") class CreatePrimaryKeyOp(AddConstraintOp): """Represent a create primary key operation.""" constraint_type = "primarykey" def __init__( self, constraint_name, table_name, columns, schema=None, _orig_constraint=None, **kw): self.constraint_name = constraint_name self.table_name = table_name self.columns = columns self.schema = schema self._orig_constraint = _orig_constraint self.kw = kw @classmethod def from_constraint(cls, constraint): constraint_table = sqla_compat._table_for_constraint(constraint) return cls( constraint.name, constraint_table.name, constraint.columns, schema=constraint_table.schema, _orig_constraint=constraint ) def to_constraint(self, migration_context=None): if self._orig_constraint is not None: return self._orig_constraint schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.primary_key_constraint( self.constraint_name, self.table_name, self.columns, schema=self.schema) @classmethod @util._with_legacy_names([ ('name', 'constraint_name'), ('cols', 'columns') ]) def create_primary_key( cls, operations, constraint_name, table_name, columns, schema=None): """Issue a "create primary key" instruction using the current migration context. e.g.:: from alembic import op op.create_primary_key( "pk_my_table", "my_table", ["id", "version"] ) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.PrimaryKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the primary key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions` ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the target table. :param columns: a list of string column names to be applied to the primary key constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name * cols -> columns """ op = cls(constraint_name, table_name, columns, schema) return operations.invoke(op) @classmethod def batch_create_primary_key(cls, operations, constraint_name, columns): """Issue a "create primary key" instruction using the current batch migration context. The batch form of this call omits the ``table_name`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_primary_key` """ op = cls( constraint_name, operations.impl.table_name, columns, schema=operations.impl.schema ) return operations.invoke(op) @Operations.register_operation("create_unique_constraint") @BatchOperations.register_operation( "create_unique_constraint", "batch_create_unique_constraint") class CreateUniqueConstraintOp(AddConstraintOp): """Represent a create unique constraint operation.""" constraint_type = "unique" def __init__( self, constraint_name, table_name, columns, schema=None, _orig_constraint=None, **kw): self.constraint_name = constraint_name self.table_name = table_name self.columns = columns self.schema = schema self._orig_constraint = _orig_constraint self.kw = kw @classmethod def from_constraint(cls, constraint): constraint_table = sqla_compat._table_for_constraint(constraint) kw = {} if constraint.deferrable: kw['deferrable'] = constraint.deferrable if constraint.initially: kw['initially'] = constraint.initially return cls( constraint.name, constraint_table.name, [c.name for c in constraint.columns], schema=constraint_table.schema, _orig_constraint=constraint, **kw ) def to_constraint(self, migration_context=None): if self._orig_constraint is not None: return self._orig_constraint schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.unique_constraint( self.constraint_name, self.table_name, self.columns, schema=self.schema, **self.kw) @classmethod @util._with_legacy_names([ ('name', 'constraint_name'), ('source', 'table_name'), ('local_cols', 'columns'), ]) def create_unique_constraint( cls, operations, constraint_name, table_name, columns, schema=None, **kw): """Issue a "create unique constraint" instruction using the current migration context. e.g.:: from alembic import op op.create_unique_constraint("uq_user_name", "user", ["name"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.UniqueConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the unique constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param columns: a list of string column names in the source table. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name * source -> table_name * local_cols -> columns """ op = cls( constraint_name, table_name, columns, schema=schema, **kw ) return operations.invoke(op) @classmethod @util._with_legacy_names([('name', 'constraint_name')]) def batch_create_unique_constraint( cls, operations, constraint_name, columns, **kw): """Issue a "create unique constraint" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_unique_constraint` .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name """ kw['schema'] = operations.impl.schema op = cls( constraint_name, operations.impl.table_name, columns, **kw ) return operations.invoke(op) @Operations.register_operation("create_foreign_key") @BatchOperations.register_operation( "create_foreign_key", "batch_create_foreign_key") class CreateForeignKeyOp(AddConstraintOp): """Represent a create foreign key constraint operation.""" constraint_type = "foreignkey" def __init__( self, constraint_name, source_table, referent_table, local_cols, remote_cols, _orig_constraint=None, **kw): self.constraint_name = constraint_name self.source_table = source_table self.referent_table = referent_table self.local_cols = local_cols self.remote_cols = remote_cols self._orig_constraint = _orig_constraint self.kw = kw def to_diff_tuple(self): return ("add_fk", self.to_constraint()) @classmethod def from_constraint(cls, constraint): kw = {} if constraint.onupdate: kw['onupdate'] = constraint.onupdate if constraint.ondelete: kw['ondelete'] = constraint.ondelete if constraint.initially: kw['initially'] = constraint.initially if constraint.deferrable: kw['deferrable'] = constraint.deferrable if constraint.use_alter: kw['use_alter'] = constraint.use_alter source_schema, source_table, \ source_columns, target_schema, \ target_table, target_columns,\ onupdate, ondelete, deferrable, initially \ = sqla_compat._fk_spec(constraint) kw['source_schema'] = source_schema kw['referent_schema'] = target_schema return cls( constraint.name, source_table, target_table, source_columns, target_columns, _orig_constraint=constraint, **kw ) def to_constraint(self, migration_context=None): if self._orig_constraint is not None: return self._orig_constraint schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.foreign_key_constraint( self.constraint_name, self.source_table, self.referent_table, self.local_cols, self.remote_cols, **self.kw) @classmethod @util._with_legacy_names([ ('name', 'constraint_name'), ('source', 'source_table'), ('referent', 'referent_table'), ]) def create_foreign_key(cls, operations, constraint_name, source_table, referent_table, local_cols, remote_cols, onupdate=None, ondelete=None, deferrable=None, initially=None, match=None, source_schema=None, referent_schema=None, **dialect_kw): """Issue a "create foreign key" instruction using the current migration context. e.g.:: from alembic import op op.create_foreign_key( "fk_user_address", "address", "user", ["user_id"], ["id"]) This internally generates a :class:`~sqlalchemy.schema.Table` object containing the necessary columns, then generates a new :class:`~sqlalchemy.schema.ForeignKeyConstraint` object which it then associates with the :class:`~sqlalchemy.schema.Table`. Any event listeners associated with this action will be fired off normally. The :class:`~sqlalchemy.schema.AddConstraint` construct is ultimately used to generate the ALTER statement. :param name: Name of the foreign key constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param source_table: String name of the source table. :param referent_table: String name of the destination table. :param local_cols: a list of string column names in the source table. :param remote_cols: a list of string column names in the remote table. :param onupdate: Optional string. If set, emit ON UPDATE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param source_schema: Optional schema name of the source table. :param referent_schema: Optional schema name of the destination table. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name * source -> source_table * referent -> referent_table """ op = cls( constraint_name, source_table, referent_table, local_cols, remote_cols, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, source_schema=source_schema, referent_schema=referent_schema, initially=initially, match=match, **dialect_kw ) return operations.invoke(op) @classmethod @util._with_legacy_names([ ('name', 'constraint_name'), ('referent', 'referent_table') ]) def batch_create_foreign_key( cls, operations, constraint_name, referent_table, local_cols, remote_cols, referent_schema=None, onupdate=None, ondelete=None, deferrable=None, initially=None, match=None, **dialect_kw): """Issue a "create foreign key" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``source_schema`` arguments from the call. e.g.:: with batch_alter_table("address") as batch_op: batch_op.create_foreign_key( "fk_user_address", "user", ["user_id"], ["id"]) .. seealso:: :meth:`.Operations.create_foreign_key` .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name * referent -> referent_table """ op = cls( constraint_name, operations.impl.table_name, referent_table, local_cols, remote_cols, onupdate=onupdate, ondelete=ondelete, deferrable=deferrable, source_schema=operations.impl.schema, referent_schema=referent_schema, initially=initially, match=match, **dialect_kw ) return operations.invoke(op) @Operations.register_operation("create_check_constraint") @BatchOperations.register_operation( "create_check_constraint", "batch_create_check_constraint") class CreateCheckConstraintOp(AddConstraintOp): """Represent a create check constraint operation.""" constraint_type = "check" def __init__( self, constraint_name, table_name, condition, schema=None, _orig_constraint=None, **kw): self.constraint_name = constraint_name self.table_name = table_name self.condition = condition self.schema = schema self._orig_constraint = _orig_constraint self.kw = kw @classmethod def from_constraint(cls, constraint): constraint_table = sqla_compat._table_for_constraint(constraint) return cls( constraint.name, constraint_table.name, constraint.sqltext, schema=constraint_table.schema, _orig_constraint=constraint ) def to_constraint(self, migration_context=None): if self._orig_constraint is not None: return self._orig_constraint schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.check_constraint( self.constraint_name, self.table_name, self.condition, schema=self.schema, **self.kw) @classmethod @util._with_legacy_names([ ('name', 'constraint_name'), ('source', 'table_name') ]) def create_check_constraint( cls, operations, constraint_name, table_name, condition, schema=None, **kw): """Issue a "create check constraint" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy.sql import column, func op.create_check_constraint( "ck_user_name_len", "user", func.len(column('name')) > 5 ) CHECK constraints are usually against a SQL expression, so ad-hoc table metadata is usually needed. The function will convert the given arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound to an anonymous table in order to emit the CREATE statement. :param name: Name of the check constraint. The name is necessary so that an ALTER statement can be emitted. For setups that use an automated naming scheme such as that described at :ref:`sqla:constraint_naming_conventions`, ``name`` here can be ``None``, as the event listener will apply the name to the constraint object when it is associated with the table. :param table_name: String name of the source table. :param condition: SQL expression that's the condition of the constraint. Can be a string or SQLAlchemy expression language structure. :param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name * source -> table_name """ op = cls(constraint_name, table_name, condition, schema=schema, **kw) return operations.invoke(op) @classmethod @util._with_legacy_names([('name', 'constraint_name')]) def batch_create_check_constraint( cls, operations, constraint_name, condition, **kw): """Issue a "create check constraint" instruction using the current batch migration context. The batch form of this call omits the ``source`` and ``schema`` arguments from the call. .. seealso:: :meth:`.Operations.create_check_constraint` .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> constraint_name """ op = cls( constraint_name, operations.impl.table_name, condition, schema=operations.impl.schema, **kw) return operations.invoke(op) @Operations.register_operation("create_index") @BatchOperations.register_operation("create_index", "batch_create_index") class CreateIndexOp(MigrateOperation): """Represent a create index operation.""" def __init__( self, index_name, table_name, columns, schema=None, unique=False, _orig_index=None, **kw): self.index_name = index_name self.table_name = table_name self.columns = columns self.schema = schema self.unique = unique self.kw = kw self._orig_index = _orig_index def reverse(self): return DropIndexOp.from_index(self.to_index()) def to_diff_tuple(self): return ("add_index", self.to_index()) @classmethod def from_index(cls, index): return cls( index.name, index.table.name, sqla_compat._get_index_expressions(index), schema=index.table.schema, unique=index.unique, _orig_index=index, **index.kwargs ) def to_index(self, migration_context=None): if self._orig_index: return self._orig_index schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.index( self.index_name, self.table_name, self.columns, schema=self.schema, unique=self.unique, **self.kw) @classmethod @util._with_legacy_names([('name', 'index_name')]) def create_index( cls, operations, index_name, table_name, columns, schema=None, unique=False, **kw): """Issue a "create index" instruction using the current migration context. e.g.:: from alembic import op op.create_index('ik_test', 't1', ['foo', 'bar']) Functional indexes can be produced by using the :func:`sqlalchemy.sql.expression.text` construct:: from alembic import op from sqlalchemy import text op.create_index('ik_test', 't1', [text('lower(foo)')]) .. versionadded:: 0.6.7 support for making use of the :func:`~sqlalchemy.sql.expression.text` construct in conjunction with :meth:`.Operations.create_index` in order to produce functional expressions within CREATE INDEX. :param index_name: name of the index. :param table_name: name of the owning table. :param columns: a list consisting of string column names and/or :func:`~sqlalchemy.sql.expression.text` constructs. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. :param unique: If True, create a unique index. :param quote: Force quoting of this column's name on or off, corresponding to ``True`` or ``False``. When left at its default of ``None``, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it's a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect. :param \**kw: Additional keyword arguments not mentioned above are dialect specific, and passed in the form ``<dialectname>_<argname>``. See the documentation regarding an individual dialect at :ref:`dialect_toplevel` for detail on documented arguments. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> index_name """ op = cls( index_name, table_name, columns, schema=schema, unique=unique, **kw ) return operations.invoke(op) @classmethod def batch_create_index(cls, operations, index_name, columns, **kw): """Issue a "create index" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.create_index` """ op = cls( index_name, operations.impl.table_name, columns, schema=operations.impl.schema, **kw ) return operations.invoke(op) @Operations.register_operation("drop_index") @BatchOperations.register_operation("drop_index", "batch_drop_index") class DropIndexOp(MigrateOperation): """Represent a drop index operation.""" def __init__( self, index_name, table_name=None, schema=None, _orig_index=None): self.index_name = index_name self.table_name = table_name self.schema = schema self._orig_index = _orig_index def to_diff_tuple(self): return ("remove_index", self.to_index()) def reverse(self): if self._orig_index is None: raise ValueError( "operation is not reversible; " "original index is not present") return CreateIndexOp.from_index(self._orig_index) @classmethod def from_index(cls, index): return cls( index.name, index.table.name, schema=index.table.schema, _orig_index=index ) def to_index(self, migration_context=None): if self._orig_index is not None: return self._orig_index schema_obj = schemaobj.SchemaObjects(migration_context) # need a dummy column name here since SQLAlchemy # 0.7.6 and further raises on Index with no columns return schema_obj.index( self.index_name, self.table_name, ['x'], schema=self.schema) @classmethod @util._with_legacy_names([ ('name', 'index_name'), ('tablename', 'table_name') ]) def drop_index(cls, operations, index_name, table_name=None, schema=None): """Issue a "drop index" instruction using the current migration context. e.g.:: drop_index("accounts") :param index_name: name of the index. :param table_name: name of the owning table. Some backends such as Microsoft SQL Server require this. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> index_name """ op = cls(index_name, table_name=table_name, schema=schema) return operations.invoke(op) @classmethod @util._with_legacy_names([('name', 'index_name')]) def batch_drop_index(cls, operations, index_name, **kw): """Issue a "drop index" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.drop_index` .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> index_name """ op = cls( index_name, table_name=operations.impl.table_name, schema=operations.impl.schema ) return operations.invoke(op) @Operations.register_operation("create_table") class CreateTableOp(MigrateOperation): """Represent a create table operation.""" def __init__( self, table_name, columns, schema=None, _orig_table=None, **kw): self.table_name = table_name self.columns = columns self.schema = schema self.kw = kw self._orig_table = _orig_table def reverse(self): return DropTableOp.from_table(self.to_table()) def to_diff_tuple(self): return ("add_table", self.to_table()) @classmethod def from_table(cls, table): return cls( table.name, list(table.c) + list(table.constraints), schema=table.schema, _orig_table=table, **table.kwargs ) def to_table(self, migration_context=None): if self._orig_table is not None: return self._orig_table schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.table( self.table_name, *self.columns, schema=self.schema, **self.kw ) @classmethod @util._with_legacy_names([('name', 'table_name')]) def create_table(cls, operations, table_name, *columns, **kw): """Issue a "create table" instruction using the current migration context. This directive receives an argument list similar to that of the traditional :class:`sqlalchemy.schema.Table` construct, but without the metadata:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) Note that :meth:`.create_table` accepts :class:`~sqlalchemy.schema.Column` constructs directly from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the "timestamp" column op.create_table('account', Column('id', INTEGER, primary_key=True), Column('timestamp', TIMESTAMP, server_default=func.now()) ) The function also returns a newly created :class:`~sqlalchemy.schema.Table` object, corresponding to the table specification given, which is suitable for immediate SQL operations, in particular :meth:`.Operations.bulk_insert`:: from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column from alembic import op account_table = op.create_table( 'account', Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(50), nullable=False), Column('description', NVARCHAR(200)), Column('timestamp', TIMESTAMP, server_default=func.now()) ) op.bulk_insert( account_table, [ {"name": "A1", "description": "account 1"}, {"name": "A2", "description": "account 2"}, ] ) .. versionadded:: 0.7.0 :param table_name: Name of the table :param \*columns: collection of :class:`~sqlalchemy.schema.Column` objects within the table, as well as optional :class:`~sqlalchemy.schema.Constraint` objects and :class:`~.sqlalchemy.schema.Index` objects. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. :return: the :class:`~sqlalchemy.schema.Table` object corresponding to the parameters given. .. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table` object is returned. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> table_name """ op = cls(table_name, columns, **kw) return operations.invoke(op) @Operations.register_operation("drop_table") class DropTableOp(MigrateOperation): """Represent a drop table operation.""" def __init__( self, table_name, schema=None, table_kw=None, _orig_table=None): self.table_name = table_name self.schema = schema self.table_kw = table_kw or {} self._orig_table = _orig_table def to_diff_tuple(self): return ("remove_table", self.to_table()) def reverse(self): if self._orig_table is None: raise ValueError( "operation is not reversible; " "original table is not present") return CreateTableOp.from_table(self._orig_table) @classmethod def from_table(cls, table): return cls(table.name, schema=table.schema, _orig_table=table) def to_table(self, migration_context=None): if self._orig_table is not None: return self._orig_table schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.table( self.table_name, schema=self.schema, **self.table_kw) @classmethod @util._with_legacy_names([('name', 'table_name')]) def drop_table(cls, operations, table_name, schema=None, **kw): """Issue a "drop table" instruction using the current migration context. e.g.:: drop_table("accounts") :param table_name: Name of the table :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. :param \**kw: Other keyword arguments are passed to the underlying :class:`sqlalchemy.schema.Table` object created for the command. .. versionchanged:: 0.8.0 The following positional argument names have been changed: * name -> table_name """ op = cls(table_name, schema=schema, table_kw=kw) operations.invoke(op) class AlterTableOp(MigrateOperation): """Represent an alter table operation.""" def __init__(self, table_name, schema=None): self.table_name = table_name self.schema = schema @Operations.register_operation("rename_table") class RenameTableOp(AlterTableOp): """Represent a rename table operation.""" def __init__(self, old_table_name, new_table_name, schema=None): super(RenameTableOp, self).__init__(old_table_name, schema=schema) self.new_table_name = new_table_name @classmethod def rename_table( cls, operations, old_table_name, new_table_name, schema=None): """Emit an ALTER TABLE to rename a table. :param old_table_name: old name. :param new_table_name: new name. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ op = cls(old_table_name, new_table_name, schema=schema) return operations.invoke(op) @Operations.register_operation("alter_column") @BatchOperations.register_operation("alter_column", "batch_alter_column") class AlterColumnOp(AlterTableOp): """Represent an alter column operation.""" def __init__( self, table_name, column_name, schema=None, existing_type=None, existing_server_default=False, existing_nullable=None, modify_nullable=None, modify_server_default=False, modify_name=None, modify_type=None, **kw ): super(AlterColumnOp, self).__init__(table_name, schema=schema) self.column_name = column_name self.existing_type = existing_type self.existing_server_default = existing_server_default self.existing_nullable = existing_nullable self.modify_nullable = modify_nullable self.modify_server_default = modify_server_default self.modify_name = modify_name self.modify_type = modify_type self.kw = kw def to_diff_tuple(self): col_diff = [] schema, tname, cname = self.schema, self.table_name, self.column_name if self.modify_type is not None: col_diff.append( ("modify_type", schema, tname, cname, { "existing_nullable": self.existing_nullable, "existing_server_default": self.existing_server_default, }, self.existing_type, self.modify_type) ) if self.modify_nullable is not None: col_diff.append( ("modify_nullable", schema, tname, cname, { "existing_type": self.existing_type, "existing_server_default": self.existing_server_default }, self.existing_nullable, self.modify_nullable) ) if self.modify_server_default is not False: col_diff.append( ("modify_default", schema, tname, cname, { "existing_nullable": self.existing_nullable, "existing_type": self.existing_type }, self.existing_server_default, self.modify_server_default) ) return col_diff def has_changes(self): hc1 = self.modify_nullable is not None or \ self.modify_server_default is not False or \ self.modify_type is not None if hc1: return True for kw in self.kw: if kw.startswith('modify_'): return True else: return False def reverse(self): kw = self.kw.copy() kw['existing_type'] = self.existing_type kw['existing_nullable'] = self.existing_nullable kw['existing_server_default'] = self.existing_server_default if self.modify_type is not None: kw['modify_type'] = self.modify_type if self.modify_nullable is not None: kw['modify_nullable'] = self.modify_nullable if self.modify_server_default is not False: kw['modify_server_default'] = self.modify_server_default # TODO: make this a little simpler all_keys = set(m.group(1) for m in [ re.match(r'^(?:existing_|modify_)(.+)$', k) for k in kw ] if m) for k in all_keys: if 'modify_%s' % k in kw: swap = kw['existing_%s' % k] kw['existing_%s' % k] = kw['modify_%s' % k] kw['modify_%s' % k] = swap return self.__class__( self.table_name, self.column_name, schema=self.schema, **kw ) @classmethod @util._with_legacy_names([('name', 'new_column_name')]) def alter_column( cls, operations, table_name, column_name, nullable=None, server_default=False, new_column_name=None, type_=None, existing_type=None, existing_server_default=False, existing_nullable=None, schema=None, **kw ): """Issue an "alter column" instruction using the current migration context. Generally, only that aspect of the column which is being changed, i.e. name, type, nullability, default, needs to be specified. Multiple changes can also be specified at once and the backend should "do the right thing", emitting each change either separately or together as the backend allows. MySQL has special requirements here, since MySQL cannot ALTER a column without a full specification. When producing MySQL-compatible migration files, it is recommended that the ``existing_type``, ``existing_server_default``, and ``existing_nullable`` parameters be present, if not being altered. Type changes which are against the SQLAlchemy "schema" types :class:`~sqlalchemy.types.Boolean` and :class:`~sqlalchemy.types.Enum` may also add or drop constraints which accompany those types on backends that don't support them natively. The ``existing_server_default`` argument is used in this case as well to remove a previous constraint. :param table_name: string name of the target table. :param column_name: string name of the target column, as it exists before the operation begins. :param nullable: Optional; specify ``True`` or ``False`` to alter the column's nullability. :param server_default: Optional; specify a string SQL expression, :func:`~sqlalchemy.sql.expression.text`, or :class:`~sqlalchemy.schema.DefaultClause` to indicate an alteration to the column's default value. Set to ``None`` to have the default removed. :param new_column_name: Optional; specify a string name here to indicate the new name within a column rename operation. :param ``type_``: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify a change to the column's type. For SQLAlchemy types that also indicate a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), the constraint is also generated. :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; currently understood by the MySQL dialect. :param existing_type: Optional; a :class:`~sqlalchemy.types.TypeEngine` type object to specify the previous type. This is required for all MySQL column alter operations that don't otherwise specify a new type, as well as for when nullability is being changed on a SQL Server column. It is also used if the type is a so-called SQLlchemy "schema" type which may define a constraint (i.e. :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), so that the constraint can be dropped. :param existing_server_default: Optional; The existing default value of the column. Required on MySQL if an existing default is not being changed; else MySQL removes the default. :param existing_nullable: Optional; the existing nullability of the column. Required on MySQL if the existing nullability is not being changed; else MySQL sets this to NULL. :param existing_autoincrement: Optional; the existing autoincrement of the column. Used for MySQL's system of altering a column that specifies ``AUTO_INCREMENT``. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ alt = cls( table_name, column_name, schema=schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, modify_name=new_column_name, modify_type=type_, modify_server_default=server_default, modify_nullable=nullable, **kw ) return operations.invoke(alt) @classmethod def batch_alter_column( cls, operations, column_name, nullable=None, server_default=False, new_column_name=None, type_=None, existing_type=None, existing_server_default=False, existing_nullable=None, **kw ): """Issue an "alter column" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.add_column` """ alt = cls( operations.impl.table_name, column_name, schema=operations.impl.schema, existing_type=existing_type, existing_server_default=existing_server_default, existing_nullable=existing_nullable, modify_name=new_column_name, modify_type=type_, modify_server_default=server_default, modify_nullable=nullable, **kw ) return operations.invoke(alt) @Operations.register_operation("add_column") @BatchOperations.register_operation("add_column", "batch_add_column") class AddColumnOp(AlterTableOp): """Represent an add column operation.""" def __init__(self, table_name, column, schema=None): super(AddColumnOp, self).__init__(table_name, schema=schema) self.column = column def reverse(self): return DropColumnOp.from_column_and_tablename( self.schema, self.table_name, self.column) def to_diff_tuple(self): return ("add_column", self.schema, self.table_name, self.column) def to_column(self): return self.column @classmethod def from_column(cls, col): return cls(col.table.name, col, schema=col.table.schema) @classmethod def from_column_and_tablename(cls, schema, tname, col): return cls(tname, col, schema=schema) @classmethod def add_column(cls, operations, table_name, column, schema=None): """Issue an "add column" instruction using the current migration context. e.g.:: from alembic import op from sqlalchemy import Column, String op.add_column('organization', Column('name', String()) ) The provided :class:`~sqlalchemy.schema.Column` object can also specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing a remote table name. Alembic will automatically generate a stub "referenced" table and emit a second ALTER statement in order to add the constraint separately:: from alembic import op from sqlalchemy import Column, INTEGER, ForeignKey op.add_column('organization', Column('account_id', INTEGER, ForeignKey('accounts.id')) ) Note that this statement uses the :class:`~sqlalchemy.schema.Column` construct as is from the SQLAlchemy library. In particular, default values to be created on the database side are specified using the ``server_default`` parameter, and not ``default`` which only specifies Python-side defaults:: from alembic import op from sqlalchemy import Column, TIMESTAMP, func # specify "DEFAULT NOW" along with the column add op.add_column('account', Column('timestamp', TIMESTAMP, server_default=func.now()) ) :param table_name: String name of the parent table. :param column: a :class:`sqlalchemy.schema.Column` object representing the new column. :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. """ op = cls(table_name, column, schema=schema) return operations.invoke(op) @classmethod def batch_add_column(cls, operations, column): """Issue an "add column" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.add_column` """ op = cls( operations.impl.table_name, column, schema=operations.impl.schema ) return operations.invoke(op) @Operations.register_operation("drop_column") @BatchOperations.register_operation("drop_column", "batch_drop_column") class DropColumnOp(AlterTableOp): """Represent a drop column operation.""" def __init__( self, table_name, column_name, schema=None, _orig_column=None, **kw): super(DropColumnOp, self).__init__(table_name, schema=schema) self.column_name = column_name self.kw = kw self._orig_column = _orig_column def to_diff_tuple(self): return ( "remove_column", self.schema, self.table_name, self.to_column()) def reverse(self): if self._orig_column is None: raise ValueError( "operation is not reversible; " "original column is not present") return AddColumnOp.from_column_and_tablename( self.schema, self.table_name, self._orig_column) @classmethod def from_column_and_tablename(cls, schema, tname, col): return cls(tname, col.name, schema=schema, _orig_column=col) def to_column(self, migration_context=None): if self._orig_column is not None: return self._orig_column schema_obj = schemaobj.SchemaObjects(migration_context) return schema_obj.column(self.column_name, NULLTYPE) @classmethod def drop_column( cls, operations, table_name, column_name, schema=None, **kw): """Issue a "drop column" instruction using the current migration context. e.g.:: drop_column('organization', 'account_id') :param table_name: name of table :param column_name: name of column :param schema: Optional schema name to operate within. To control quoting of the schema outside of the default behavior, use the SQLAlchemy construct :class:`~sqlalchemy.sql.elements.quoted_name`. .. versionadded:: 0.7.0 'schema' can now accept a :class:`~sqlalchemy.sql.elements.quoted_name` construct. :param mssql_drop_check: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the CHECK constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.check_constraints, then exec's a separate DROP CONSTRAINT for that constraint. :param mssql_drop_default: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop the DEFAULT constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.default_constraints, then exec's a separate DROP CONSTRAINT for that default. :param mssql_drop_foreign_key: Optional boolean. When ``True``, on Microsoft SQL Server only, first drop a single FOREIGN KEY constraint on the column using a SQL-script-compatible block that selects into a @variable from sys.foreign_keys/sys.foreign_key_columns, then exec's a separate DROP CONSTRAINT for that default. Only works if the column has exactly one FK constraint which refers to it, at the moment. .. versionadded:: 0.6.2 """ op = cls(table_name, column_name, schema=schema, **kw) return operations.invoke(op) @classmethod def batch_drop_column(cls, operations, column_name): """Issue a "drop column" instruction using the current batch migration context. .. seealso:: :meth:`.Operations.drop_column` """ op = cls( operations.impl.table_name, column_name, schema=operations.impl.schema) return operations.invoke(op) @Operations.register_operation("bulk_insert") class BulkInsertOp(MigrateOperation): """Represent a bulk insert operation.""" def __init__(self, table, rows, multiinsert=True): self.table = table self.rows = rows self.multiinsert = multiinsert @classmethod def bulk_insert(cls, operations, table, rows, multiinsert=True): """Issue a "bulk insert" operation using the current migration context. This provides a means of representing an INSERT of multiple rows which works equally well in the context of executing on a live connection as well as that of generating a SQL script. In the case of a SQL script, the values are rendered inline into the statement. e.g.:: from alembic import op from datetime import date from sqlalchemy.sql import table, column from sqlalchemy import String, Integer, Date # Create an ad-hoc table to use for the insert statement. accounts_table = table('account', column('id', Integer), column('name', String), column('create_date', Date) ) op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':date(2010, 10, 5)}, {'id':2, 'name':'Ed Williams', 'create_date':date(2007, 5, 27)}, {'id':3, 'name':'Wendy Jones', 'create_date':date(2008, 8, 15)}, ] ) When using --sql mode, some datatypes may not render inline automatically, such as dates and other special types. When this issue is present, :meth:`.Operations.inline_literal` may be used:: op.bulk_insert(accounts_table, [ {'id':1, 'name':'John Smith', 'create_date':op.inline_literal("2010-10-05")}, {'id':2, 'name':'Ed Williams', 'create_date':op.inline_literal("2007-05-27")}, {'id':3, 'name':'Wendy Jones', 'create_date':op.inline_literal("2008-08-15")}, ], multiinsert=False ) When using :meth:`.Operations.inline_literal` in conjunction with :meth:`.Operations.bulk_insert`, in order for the statement to work in "online" (e.g. non --sql) mode, the :paramref:`~.Operations.bulk_insert.multiinsert` flag should be set to ``False``, which will have the effect of individual INSERT statements being emitted to the database, each with a distinct VALUES clause, so that the "inline" values can still be rendered, rather than attempting to pass the values as bound parameters. .. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now be used with :meth:`.Operations.bulk_insert`, and the :paramref:`~.Operations.bulk_insert.multiinsert` flag has been added to assist in this usage when running in "online" mode. :param table: a table object which represents the target of the INSERT. :param rows: a list of dictionaries indicating rows. :param multiinsert: when at its default of True and --sql mode is not enabled, the INSERT statement will be executed using "executemany()" style, where all elements in the list of dictionaries are passed as bound parameters in a single list. Setting this to False results in individual INSERT statements being emitted per parameter set, and is needed in those cases where non-literal values are present in the parameter sets. .. versionadded:: 0.6.4 """ op = cls(table, rows, multiinsert=multiinsert) operations.invoke(op) @Operations.register_operation("execute") class ExecuteSQLOp(MigrateOperation): """Represent an execute SQL operation.""" def __init__(self, sqltext, execution_options=None): self.sqltext = sqltext self.execution_options = execution_options @classmethod def execute(cls, operations, sqltext, execution_options=None): """Execute the given SQL using the current migration context. In a SQL script context, the statement is emitted directly to the output stream. There is *no* return result, however, as this function is oriented towards generating a change script that can run in "offline" mode. For full interaction with a connected database, use the "bind" available from the context:: from alembic import op connection = op.get_bind() Also note that any parameterized statement here *will not work* in offline mode - INSERT, UPDATE and DELETE statements which refer to literal values would need to render inline expressions. For simple use cases, the :meth:`.inline_literal` function can be used for **rudimentary** quoting of string values. For "bulk" inserts, consider using :meth:`.bulk_insert`. For example, to emit an UPDATE statement which is equally compatible with both online and offline mode:: from sqlalchemy.sql import table, column from sqlalchemy import String from alembic import op account = table('account', column('name', String) ) op.execute( account.update().\\ where(account.c.name==op.inline_literal('account 1')).\\ values({'name':op.inline_literal('account 2')}) ) Note above we also used the SQLAlchemy :func:`sqlalchemy.sql.expression.table` and :func:`sqlalchemy.sql.expression.column` constructs to make a brief, ad-hoc table construct just for our UPDATE statement. A full :class:`~sqlalchemy.schema.Table` construct of course works perfectly fine as well, though note it's a recommended practice to at least ensure the definition of a table is self-contained within the migration script, rather than imported from a module that may break compatibility with older migrations. :param sql: Any legal SQLAlchemy expression, including: * a string * a :func:`sqlalchemy.sql.expression.text` construct. * a :func:`sqlalchemy.sql.expression.insert` construct. * a :func:`sqlalchemy.sql.expression.update`, :func:`sqlalchemy.sql.expression.insert`, or :func:`sqlalchemy.sql.expression.delete` construct. * Pretty much anything that's "executable" as described in :ref:`sqlexpression_toplevel`. :param execution_options: Optional dictionary of execution options, will be passed to :meth:`sqlalchemy.engine.Connection.execution_options`. """ op = cls(sqltext, execution_options=execution_options) return operations.invoke(op) class OpContainer(MigrateOperation): """Represent a sequence of operations operation.""" def __init__(self, ops=()): self.ops = ops def is_empty(self): return not self.ops def as_diffs(self): return list(OpContainer._ops_as_diffs(self)) @classmethod def _ops_as_diffs(cls, migrations): for op in migrations.ops: if hasattr(op, 'ops'): for sub_op in cls._ops_as_diffs(op): yield sub_op else: yield op.to_diff_tuple() class ModifyTableOps(OpContainer): """Contains a sequence of operations that all apply to a single Table.""" def __init__(self, table_name, ops, schema=None): super(ModifyTableOps, self).__init__(ops) self.table_name = table_name self.schema = schema def reverse(self): return ModifyTableOps( self.table_name, ops=list(reversed( [op.reverse() for op in self.ops] )), schema=self.schema ) class UpgradeOps(OpContainer): """contains a sequence of operations that would apply to the 'upgrade' stream of a script. .. seealso:: :ref:`customizing_revision` """ def __init__(self, ops=(), upgrade_token="upgrades"): super(UpgradeOps, self).__init__(ops=ops) self.upgrade_token = upgrade_token def reverse_into(self, downgrade_ops): downgrade_ops.ops[:] = list(reversed( [op.reverse() for op in self.ops] )) return downgrade_ops def reverse(self): return self.reverse_into(DowngradeOps(ops=[])) class DowngradeOps(OpContainer): """contains a sequence of operations that would apply to the 'downgrade' stream of a script. .. seealso:: :ref:`customizing_revision` """ def __init__(self, ops=(), downgrade_token="downgrades"): super(DowngradeOps, self).__init__(ops=ops) self.downgrade_token = downgrade_token def reverse(self): return UpgradeOps( ops=list(reversed( [op.reverse() for op in self.ops] )) ) class MigrationScript(MigrateOperation): """represents a migration script. E.g. when autogenerate encounters this object, this corresponds to the production of an actual script file. A normal :class:`.MigrationScript` object would contain a single :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive. These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops`` attributes. In the case of an autogenerate operation that runs multiple times, such as the multiple database example in the "multidb" template, the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled, and instead these objects should be accessed via the ``.upgrade_ops_list`` and ``.downgrade_ops_list`` list-based attributes. These latter attributes are always available at the very least as single-element lists. .. versionchanged:: 0.8.1 the ``.upgrade_ops`` and ``.downgrade_ops`` attributes should be accessed via the ``.upgrade_ops_list`` and ``.downgrade_ops_list`` attributes if multiple autogenerate passes proceed on the same :class:`.MigrationScript` object. .. seealso:: :ref:`customizing_revision` """ def __init__( self, rev_id, upgrade_ops, downgrade_ops, message=None, imports=set(), head=None, splice=None, branch_label=None, version_path=None, depends_on=None): self.rev_id = rev_id self.message = message self.imports = imports self.head = head self.splice = splice self.branch_label = branch_label self.version_path = version_path self.depends_on = depends_on self.upgrade_ops = upgrade_ops self.downgrade_ops = downgrade_ops @property def upgrade_ops(self): """An instance of :class:`.UpgradeOps`. .. seealso:: :attr:`.MigrationScript.upgrade_ops_list` """ if len(self._upgrade_ops) > 1: raise ValueError( "This MigrationScript instance has a multiple-entry " "list for UpgradeOps; please use the " "upgrade_ops_list attribute.") elif not self._upgrade_ops: return None else: return self._upgrade_ops[0] @upgrade_ops.setter def upgrade_ops(self, upgrade_ops): self._upgrade_ops = util.to_list(upgrade_ops) for elem in self._upgrade_ops: assert isinstance(elem, UpgradeOps) @property def downgrade_ops(self): """An instance of :class:`.DowngradeOps`. .. seealso:: :attr:`.MigrationScript.downgrade_ops_list` """ if len(self._downgrade_ops) > 1: raise ValueError( "This MigrationScript instance has a multiple-entry " "list for DowngradeOps; please use the " "downgrade_ops_list attribute.") elif not self._downgrade_ops: return None else: return self._downgrade_ops[0] @downgrade_ops.setter def downgrade_ops(self, downgrade_ops): self._downgrade_ops = util.to_list(downgrade_ops) for elem in self._downgrade_ops: assert isinstance(elem, DowngradeOps) @property def upgrade_ops_list(self): """A list of :class:`.UpgradeOps` instances. This is used in place of the :attr:`.MigrationScript.upgrade_ops` attribute when dealing with a revision operation that does multiple autogenerate passes. .. versionadded:: 0.8.1 """ return self._upgrade_ops @property def downgrade_ops_list(self): """A list of :class:`.DowngradeOps` instances. This is used in place of the :attr:`.MigrationScript.downgrade_ops` attribute when dealing with a revision operation that does multiple autogenerate passes. .. versionadded:: 0.8.1 """ return self._downgrade_ops
0.000069
#----------------------------------------------------------------------------- # Copyright (c) 2013-2019, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import os import sys try: FileNotFoundError except NameError: # FileNotFoundError is new in Python 3.0 # NB: Aliasing IOError is not a full emulation of FileNotFoundError, # but far enough for this usecase, where the whole frozen program # terminates when this exception occurs. FileNotFoundError = IOError tcldir = os.path.join(sys._MEIPASS, 'tcl') tkdir = os.path.join(sys._MEIPASS, 'tk') if not os.path.isdir(tcldir): raise FileNotFoundError('Tcl data directory "%s" not found.' % (tcldir)) if not os.path.isdir(tkdir): raise FileNotFoundError('Tk data directory "%s" not found.' % (tkdir)) # Notify "tkinter" of such directories. os.environ["TCL_LIBRARY"] = tcldir os.environ["TK_LIBRARY"] = tkdir
0.001741
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .contractual_rules_attribution import ContractualRulesAttribution class ContractualRulesLinkAttribution(ContractualRulesAttribution): """Defines a contractual rule for link attribution. Variables are only populated by the server, and will be ignored when sending a request. :ivar target_property_name: The name of the field that the rule applies to. :vartype target_property_name: str :param _type: Constant filled by server. :type _type: str :ivar must_be_close_to_content: A Boolean value that determines whether the contents of the rule must be placed in close proximity to the field that the rule applies to. If true, the contents must be placed in close proximity. If false, or this field does not exist, the contents may be placed at the caller's discretion. :vartype must_be_close_to_content: bool :param text: The attribution text. :type text: str :param url: The URL to the provider's website. Use text and URL to create the hyperlink. :type url: str :ivar optional_for_list_display: Indicates whether this provider's attribution is optional. :vartype optional_for_list_display: bool """ _validation = { 'target_property_name': {'readonly': True}, '_type': {'required': True}, 'must_be_close_to_content': {'readonly': True}, 'text': {'required': True}, 'url': {'required': True}, 'optional_for_list_display': {'readonly': True}, } _attribute_map = { 'target_property_name': {'key': 'targetPropertyName', 'type': 'str'}, '_type': {'key': '_type', 'type': 'str'}, 'must_be_close_to_content': {'key': 'mustBeCloseToContent', 'type': 'bool'}, 'text': {'key': 'text', 'type': 'str'}, 'url': {'key': 'url', 'type': 'str'}, 'optional_for_list_display': {'key': 'optionalForListDisplay', 'type': 'bool'}, } def __init__(self, text, url): super(ContractualRulesLinkAttribution, self).__init__() self.text = text self.url = url self.optional_for_list_display = None self._type = 'ContractualRules/LinkAttribution'
0.000754
import hashlib from django.db.backends.creation import BaseDatabaseCreation from django.db.backends.utils import truncate_name from django.db.models.fields.related import ManyToManyField from django.db.transaction import atomic from django.utils.encoding import force_bytes from django.utils.log import getLogger from django.utils import six logger = getLogger('django.db.backends.schema') class BaseDatabaseSchemaEditor(object): """ This class (and its subclasses) are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. It is intended to eventually completely replace DatabaseCreation. This class should be used by creating an instance for each set of schema changes (e.g. a syncdb run, a migration file), and by first calling start(), then the relevant actions, and then commit(). This is necessary to allow things like circular foreign key references - FKs will only be created once commit() is called. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_create_table_unique = "UNIQUE (%(columns)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)" sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" ) sql_create_inline_fk = None sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" def __init__(self, connection, collect_sql=False): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] # State-managing methods def __enter__(self): self.deferred_sql = [] if self.connection.features.can_rollback_ddl: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.connection.features.can_rollback_ddl: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=[]): """ Executes the given SQL statement, with optional parameters. """ # Log the command we're running, then run it logger.debug("%s; (params %r)" % (sql, params)) if self.collect_sql: self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ";") else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) # Field <-> database mapping functions def column_sql(self, model, field, include_default=False): """ Takes a field and returns its column definition. The field must already have had set_attributes_from_name called. """ # Get the column's type and use that as the basis of the SQL db_params = field.db_parameters(connection=self.connection) sql = db_params['type'] params = [] # Check for fields that aren't actually columns (e.g. M2M) if sql is None: return None, None # Work out nullability null = field.null # If we were told to include a default value, do so default_value = self.effective_default(field) include_default = include_default and not self.skip_default(field) if include_default and default_value is not None: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default sql += " DEFAULT %s" % self.prepare_default(default_value) else: sql += " DEFAULT %s" params += [default_value] # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls): null = True if null and not self.connection.features.implied_column_null: sql += " NULL" elif not null: sql += " NOT NULL" # Primary key/unique outputs if field.primary_key: sql += " PRIMARY KEY" elif field.unique: sql += " UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column tablespace = field.db_tablespace or model._meta.db_tablespace if tablespace and self.connection.features.supports_tablespaces and field.unique: sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True) # Return the sql return sql, params def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( 'subclasses of BaseDatabaseSchemaEditor for backends which have ' 'requires_literal_defaults must provide a prepare_default() method' ) def effective_default(self, field): """ Returns a field's effective database default value """ if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = six.binary_type() else: default = six.text_type() else: default = None # If it's a callable, call it if six.callable(default): default = default() # Run it through the field's get_db_prep_save method so we can send it # to the database. default = field.get_db_prep_save(default, self.connection) return default def quote_value(self, value): """ Returns a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Takes a model and creates a table for it in the database. Will also create any accompanying indexes or unique constraints. """ # Create column SQL, add FK deferreds if needed column_sqls = [] params = [] for field in model._meta.local_fields: # SQL definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Autoincrement SQL (for backends with inline variant) col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # Indexes if field.db_index and not field.unique: self.deferred_sql.append(self._create_index_sql(model, [field], suffix="")) # FK if field.rel and field.db_constraint: to_table = field.rel.to._meta.db_table to_column = field.rel.to._meta.get_field(field.rel.field_name).column if self.connection.features.supports_foreign_keys: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) elif self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } # Add the SQL to our big list column_sqls.append("%s %s" % ( self.quote_name(field.column), definition, )) # Autoincrement SQL (for backends with post table definition variant) if field.get_internal_type() == "AutoField": autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) # Add any unique_togethers for fields in model._meta.unique_together: columns = [model._meta.get_field_by_name(field)[0].column for field in fields] column_sqls.append(self.sql_create_table_unique % { "columns": ", ".join(self.quote_name(column) for column in columns), }) # Make the table sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join(column_sqls) } self.execute(sql, params) # Add any index_togethers for field_names in model._meta.index_together: fields = [model._meta.get_field_by_name(field)[0] for field in field_names] self.execute(self._create_index_sql(model, fields, suffix="_idx")) # Make M2M tables for field in model._meta.local_many_to_many: if field.rel.through._meta.auto_created: self.create_model(field.rel.through) def delete_model(self, model): """ Deletes a model from the database. """ # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.rel.through._meta.auto_created: self.delete_model(field.rel.through) # Delete the table self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deals with a model changing its unique_together. Note: The input unique_togethers must be doubly-nested, not the single- nested ["foo", "bar"] format. """ olds = set(tuple(fields) for fields in old_unique_together) news = set(tuple(fields) for fields in new_unique_together) # Deleted uniques for fields in olds.difference(news): columns = [model._meta.get_field_by_name(field)[0].column for field in fields] constraint_names = self._constraint_names(model, columns, unique=True) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_names[0])) # Created uniques for fields in news.difference(olds): columns = [model._meta.get_field_by_name(field)[0].column for field in fields] self.execute(self._create_unique_sql(model, columns)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deals with a model changing its index_together. Note: The input index_togethers must be doubly-nested, not the single- nested ["foo", "bar"] format. """ olds = set(tuple(fields) for fields in old_index_together) news = set(tuple(fields) for fields in new_index_together) # Deleted indexes for fields in olds.difference(news): columns = [model._meta.get_field_by_name(field)[0].column for field in fields] constraint_names = self._constraint_names(model, list(columns), index=True) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(self.sql_delete_index, model, constraint_names[0])) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field_by_name(field)[0] for field in field_names] self.execute(self._create_index_sql(model, fields, suffix="_idx")) def alter_db_table(self, model, old_db_table, new_db_table): """ Renames the table a model points to. """ if old_db_table == new_db_table: return self.execute(self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), }) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """ Moves a model's table between tablespaces """ self.execute(self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), }) def add_field(self, model, field): """ Creates a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields) """ # Special-case implicit M2M tables if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created: return self.create_model(field.rel.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " CHECK (%s)" % db_params['check'] # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default(field) and field.default is not None: sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": self.sql_alter_column_no_default % { "column": self.quote_name(field.column), } } self.execute(sql) # Add an index, if required if field.db_index and not field.unique: self.deferred_sql.append(self._create_index_sql(model, [field])) # Add any FK constraints later if field.rel and self.connection.features.supports_foreign_keys and field.db_constraint: self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s")) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Removes a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created: return self.delete_model(field.rel.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.rel: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def alter_field(self, model, old_field, new_field, strict=False): """ Allows a field's type, uniqueness, nullability, default, column, constraints etc. to be modified. Requires a copy of the old field as well so we can only perform changes that are required. If strict is true, raises errors if the old column does not match old_field precisely. """ # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params['type'] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params['type'] if (old_type is None and old_field.rel is None) or (new_type is None and new_field.rel is None): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using PostGIS 1.5 or badly-written custom " "fields?)" % (old_field, new_field), ) elif old_type is None and new_type is None and ( old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created): return self._alter_many_to_many(model, old_field, new_field, strict) elif old_type is None and new_type is None and ( old_field.rel.through and new_field.rel.through and not old_field.rel.through._meta.auto_created and not new_field.rel.through._meta.auto_created): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Actually perform a "physical" (non-ManyToMany) field update.""" # Has unique been removed? if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)): # Find the unique constraint for this field constraint_names = self._constraint_names(model, [old_field.column], unique=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name)) # Drop any FK constraints, we'll remake them later fks_dropped = set() if old_field.rel and old_field.db_constraint: fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) if strict and len(fk_names) != 1: raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, )) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Drop incoming FK constraints if we're a primary key and things are going # to change. if old_field.primary_key and new_field.primary_key and old_type != new_type: for rel in new_field.model._meta.get_all_related_objects(): rel_fk_names = self._constraint_names(rel.model, [rel.field.column], foreign_key=True) for fk_name in rel_fk_names: self.execute(self._delete_constraint_sql(self.sql_delete_fk, rel.model, fk_name)) # Removed an index? if (old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique)): # Find the index for this field index_names = self._constraint_names(model, [old_field.column], index=True) if strict and len(index_names) != 1: raise ValueError("Found wrong number (%s) of indexes for %s.%s" % ( len(index_names), model._meta.db_table, old_field.column, )) for index_name in index_names: self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) # Change check constraints? if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: constraint_names = self._constraint_names(model, [old_field.column], check=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute(self.sql_rename_column % { "table": self.quote_name(model._meta.db_table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, }) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type change? if old_type != new_type: fragment, other_actions = self._alter_column_type_sql(model._meta.db_table, new_field.column, new_type) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) if old_default != new_default: if new_default is None: actions.append(( self.sql_alter_column_no_default % { "column": self.quote_name(new_field.column), }, [], )) else: if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (oracle) # If this is the case, the individual schema backend should # implement prepare_default actions.append(( self.sql_alter_column_default % { "column": self.quote_name(new_field.column), "default": self.prepare_default(new_default), }, [], )) else: actions.append(( self.sql_alter_column_default % { "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], )) # Nullability change? if old_field.null != new_field.null: if new_field.null: null_actions.append(( self.sql_alter_column_null % { "column": self.quote_name(new_field.column), "type": new_type, }, [], )) else: null_actions.append(( self.sql_alter_column_not_null % { "column": self.quote_name(new_field.column), "type": new_type, }, [], )) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() and (old_field.null and not new_field.null) ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # Added a unique? if not old_field.unique and new_field.unique: self.execute(self._create_unique_sql(model, [new_field.column])) # Added an index? if (not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique)): self.execute(self._create_index_sql(model, [new_field], suffix="_uniq")) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if old_field.primary_key and new_field.primary_key and old_type != new_type: rels_to_update.extend(new_field.model._meta.get_all_related_objects()) # Changed to become primary key? # Note that we don't detect unsetting of a PK, as we assume another field # will always come along and replace it. if not old_field.primary_key and new_field.primary_key: # First, drop the old PK constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of PK constraints for %s" % ( len(constraint_names), model._meta.db_table, )) for constraint_name in constraint_names: self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name)) # Make the new one self.execute( self.sql_create_pk % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")), "columns": self.quote_name(new_field.column), } ) # Update all referencing columns rels_to_update.extend(new_field.model._meta.get_all_related_objects()) # Handle our type alters on the other end of rels from the PK stuff above for rel in rels_to_update: rel_db_params = rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params['type'] self.execute( self.sql_alter_column % { "table": self.quote_name(rel.model._meta.db_table), "changes": self.sql_alter_column_type % { "column": self.quote_name(rel.field.column), "type": rel_type, } } ) # Does it have a foreign key? if new_field.rel and \ (fks_dropped or (old_field.rel and not old_field.db_constraint)) and \ new_field.db_constraint: self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) # Rebuild FKs that pointed to us if we previously had to drop them if old_field.primary_key and new_field.primary_key and old_type != new_type: for rel in new_field.model._meta.get_all_related_objects(): self.execute(self._create_fk_sql(rel.model, rel.field, "_fk")) # Does it have check constraints we need to add? if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: self.execute( self.sql_create_check % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")), "column": self.quote_name(new_field.column), "check": new_db_params['check'], } ) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default(new_field) and new_field.default is not None: sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": self.sql_alter_column_no_default % { "column": self.quote_name(new_field.column), } } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_type_sql(self, table, column, type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Should return two things; an SQL fragment of (sql, params) to insert into an ALTER TABLE statement, and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": type, }, [], ), [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """ Alters M2Ms to repoint their to= endpoints. """ # Rename the through table if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table: self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table) # Repoint the FK to the other side self.alter_field( new_field.rel.through, # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0], new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0], ) self.alter_field( new_field.rel.through, # for self-referential models we need to alter field from the other end too old_field.rel.through._meta.get_field_by_name(old_field.m2m_field_name())[0], new_field.rel.through._meta.get_field_by_name(new_field.m2m_field_name())[0], ) def _create_index_name(self, model, column_names, suffix=""): """ Generates a unique name for an index/unique constraint. """ # If there is just one column in the index, use a default algorithm from Django if len(column_names) == 1 and not suffix: return truncate_name( '%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])), self.connection.ops.max_name_length() ) # Else generate the name for the index using a different algorithm table_name = model._meta.db_table.replace('"', '').replace('.', '_') index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names)))) max_length = self.connection.ops.max_name_length() or 200 # If the index name is too long, truncate it index_name = ('%s_%s%s%s' % ( table_name, column_names[0], index_unique_name, suffix, )).replace('"', '').replace('.', '_') if len(index_name) > max_length: part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix)) index_name = '%s%s' % (table_name[:(max_length - len(part))], part) # It shouldn't start with an underscore (Oracle hates this) if index_name[0] == "_": index_name = index_name[1:] # If it's STILL too long, just hash it down if len(index_name) > max_length: index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length] # It can't start with a number on Oracle, so prepend D if we need to if index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _create_index_sql(self, model, fields, suffix=""): columns = [field.column for field in fields] return self.sql_create_index % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)), "columns": ", ".join(self.quote_name(column) for column in columns), "extra": "", } def _create_fk_sql(self, model, field, suffix): from_table = model._meta.db_table from_column = field.column to_table = field.related_field.model._meta.db_table to_column = field.related_field.column suffix = suffix % { "to_table": to_table, "to_column": to_column, } return self.sql_create_fk % { "table": self.quote_name(from_table), "name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)), "column": self.quote_name(from_column), "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } def _create_unique_sql(self, model, columns): return self.sql_create_unique % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")), "columns": ", ".join(self.quote_name(column) for column in columns), } def _delete_constraint_sql(self, template, model, name): return template % { "table": self.quote_name(model._meta.db_table), "name": self.quote_name(name), } def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None): """ Returns all constraint names matching the columns and conditions """ column_names = list(column_names) if column_names else None with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict['columns']: if unique is not None and infodict['unique'] != unique: continue if primary_key is not None and infodict['primary_key'] != primary_key: continue if index is not None and infodict['index'] != index: continue if check is not None and infodict['check'] != check: continue if foreign_key is not None and not infodict['foreign_key']: continue result.append(name) return result
0.002471
from cms import __version__ as cms_version from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.conf import settings from django.forms.fields import CharField from django.utils.translation import ugettext_lazy as _ from djangocms_text_tinymce.forms import TextForm from djangocms_text_tinymce.models import Text from djangocms_text_tinymce.utils import plugin_tags_to_user_html from djangocms_text_tinymce.widgets import TextEditorWidget class TextPlugin(CMSPluginBase): model = Text name = _("Text") form = TextForm render_template = "cms/plugins/text.html" change_form_template = "cms/plugins/text_plugin_change_form.html" def get_editor_widget(self, request): """ Returns the Django form Widget to be used for the text area """ return TextEditorWidget(profile=settings.TINYMCE_ADMIN_CONFIG) def get_form_class(self, request): """ Returns a subclass of Form to be used by this plugin """ # We avoid mutating the Form declared above by subclassing class TextPluginForm(self.form): pass widget = self.get_editor_widget(request) TextPluginForm.declared_fields["body"] = CharField( widget=widget, required=False, label='' ) return TextPluginForm def get_form(self, request, obj=None, **kwargs): plugins = plugin_pool.get_text_enabled_plugins( self.placeholder, self.page ) pk = self.cms_plugin_instance.pk form = self.get_form_class(request) kwargs['form'] = form # override standard form return super(TextPlugin, self).get_form(request, obj, **kwargs) # def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None): """ We override the change form template path to provide backwards compatibility with CMS 2.x """ if cms_version.startswith('2'): context['change_form_template'] = "admin/cms/page/plugin_change_form.html" return super(TextPlugin, self).render_change_form(request, context, add, change, form_url, obj) def render(self, context, instance, placeholder): context.update({ 'body': plugin_tags_to_user_html( instance.body, context, placeholder ), 'placeholder': placeholder, 'object': instance }) return context def save_model(self, request, obj, form, change): obj.clean_plugins() super(TextPlugin, self).save_model(request, obj, form, change) plugin_pool.register_plugin(TextPlugin)
0.002202
#!/usr/bin/env python3 # -*- coding: utf8 -*- # # This file is part of kanjitest # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from collections import OrderedDict default_settings = OrderedDict() default_settings['default'] = { 'choice' : None, 'choice_example' : ['genki1=1:20', 'genki2=2:10'], 'prompt_list' : 'meaning', 'p_min' : 4, 'p_max' : 100, 'exp' : 4, 'permutation' : False, 'prio_proceed' : False, 'db_relative' : 'db/kanji.db', 'lang' : 'en', 'keymap' : 'urwid', 'ui_class' : 'urwid_ui', 'print_selected' : False, 'low_mem' : False, 'verbosity' : 1, 'quiet' : False, 'exit' : False, 'keydebug' : False, 'no_scheck' : False, } default_maps = OrderedDict() default_maps['default'] = { 'exit' : ['esc', 'q'], 'skip' : ['s'], 'proceed' : ['up', 'mouse_click_left', 'mouse_click'], 'hide' : ['down'], 'prioritylist' : ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], 'inc_priority' : ['+'], 'dec_priority' : ['-'], } default_maps['urwid'] = { 'exit' : ['esc', 'q'], 'skip' : ['s'], 'proceed' : ['up', 'mouse_click_left', 'mouse_click'], 'hide' : ['down'], 'prioritylist' : ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], 'inc_priority' : ['+'], 'dec_priority' : ['-'], } default_captions = OrderedDict() default_captions['en'] = { 'header_text' : 'Prompt for: ', 'flipcount' : '(flips)', 'cb_on' : 'on reading', 'cb_kun' : 'kun reading', 'cb_sign' : 'sign', 'cb_meaning' : 'meaning', 'cb_misc' : 'miscellaneous', 'label_sign' : '', 'label_on' : '▶ ', 'label_kun' : '▷ ', 'label_meaning': '', 'label_misc' : '', 'startup_sign' : '漢字', 'startup_msg' : 'Start with any key', 'footer_prio' : 'Priority: {priority}', 'footer_desc' : 'set priority: `{prioritymin}`-`{prioritymax}` next: `{proceed}` skip: `{skip}`', 'footer_ssize' : '{set_size} signs in total', 'label_sign_suffix' : '', 'label_on_suffix' : '', 'label_kun_suffix' : '', 'label_meaning_suffix': '', 'label_misc_suffix' : '', } default_captions['de'] = { 'header_text' : 'Zeige: ', 'flipcount' : '(flips)', 'cb_on' : 'On Lesung', 'cb_kun' : 'Kun Lesung', 'cb_sign' : 'Zeichen', 'cb_meaning' : 'Bedeutung', 'cb_misc' : 'Weiteres', 'label_sign' : '', 'label_on' : '▶ ', 'label_kun' : '▷ ', 'label_meaning': '', 'label_misc' : '', 'startup_sign' : '漢字', 'startup_msg' : 'Beginne mit beliebiger Taste', 'footer_prio' : 'Priorität: {priority}', 'footer_desc' : 'Priorität setzen: `{prioritymin}`-`{prioritymax}` Weiter: `{proceed}` Überspringen: `{skip}`', 'footer_ssize' : '{set_size} Zeichen in Auswahl', 'label_sign_suffix' : '', 'label_on_suffix' : '', 'label_kun_suffix' : '', 'label_meaning_suffix': '', 'label_misc_suffix' : '', }
0.014557
# orm/interfaces.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Contains various base classes used throughout the ORM. Defines some key base classes prominent within the internals, as well as the now-deprecated ORM extension classes. Other than the deprecated extensions, this module and the classes within are mostly private, though some attributes are exposed when inspecting mappings. """ from __future__ import absolute_import from .. import util from ..sql import operators from .base import (ONETOMANY, MANYTOONE, MANYTOMANY, EXT_CONTINUE, EXT_STOP, NOT_EXTENSION) from .base import (InspectionAttr, InspectionAttr, InspectionAttrInfo, _MappedAttribute) import collections from .. import inspect # imported later MapperExtension = SessionExtension = AttributeExtension = None __all__ = ( 'AttributeExtension', 'EXT_CONTINUE', 'EXT_STOP', 'ONETOMANY', 'MANYTOMANY', 'MANYTOONE', 'NOT_EXTENSION', 'LoaderStrategy', 'MapperExtension', 'MapperOption', 'MapperProperty', 'PropComparator', 'SessionExtension', 'StrategizedProperty', ) class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots): """Represent a particular class attribute mapped by :class:`.Mapper`. The most common occurrences of :class:`.MapperProperty` are the mapped :class:`.Column`, which is represented in a mapping as an instance of :class:`.ColumnProperty`, and a reference to another class produced by :func:`.relationship`, represented in the mapping as an instance of :class:`.RelationshipProperty`. """ __slots__ = ( '_configure_started', '_configure_finished', 'parent', 'key', 'info' ) cascade = frozenset() """The set of 'cascade' attribute names. This collection is checked before the 'cascade_iterator' method is called. The collection typically only applies to a RelationshipProperty. """ is_property = True """Part of the InspectionAttr interface; states this object is a mapper property. """ def _memoized_attr_info(self): """Info dictionary associated with the object, allowing user-defined data to be associated with this :class:`.InspectionAttr`. The dictionary is generated when first accessed. Alternatively, it can be specified as a constructor argument to the :func:`.column_property`, :func:`.relationship`, or :func:`.composite` functions. .. versionadded:: 0.8 Added support for .info to all :class:`.MapperProperty` subclasses. .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also available on extension types via the :attr:`.InspectionAttrInfo.info` attribute, so that it can apply to a wider variety of ORM and extension constructs. .. seealso:: :attr:`.QueryableAttribute.info` :attr:`.SchemaItem.info` """ return {} def setup(self, context, entity, path, adapter, **kwargs): """Called by Query for the purposes of constructing a SQL statement. Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate. """ def create_row_processor(self, context, path, mapper, result, adapter, populators): """Produce row processing functions and append to the given set of populators lists. """ def cascade_iterator(self, type_, state, visited_instances=None, halt_on=None): """Iterate through instances related to the given instance for a particular 'cascade', starting with this MapperProperty. Return an iterator3-tuples (instance, mapper, state). Note that the 'cascade' collection on this MapperProperty is checked first for the given type before cascade_iterator is called. This method typically only applies to RelationshipProperty. """ return iter(()) def set_parent(self, parent, init): """Set the parent mapper that references this MapperProperty. This method is overridden by some subclasses to perform extra setup when the mapper is first known. """ self.parent = parent def instrument_class(self, mapper): """Hook called by the Mapper to the property to initiate instrumentation of the class attribute managed by this MapperProperty. The MapperProperty here will typically call out to the attributes module to set up an InstrumentedAttribute. This step is the first of two steps to set up an InstrumentedAttribute, and is called early in the mapper setup process. The second step is typically the init_class_attribute step, called from StrategizedProperty via the post_instrument_class() hook. This step assigns additional state to the InstrumentedAttribute (specifically the "impl") which has been determined after the MapperProperty has determined what kind of persistence management it needs to do (e.g. scalar, object, collection, etc). """ def __init__(self): self._configure_started = False self._configure_finished = False def init(self): """Called after all mappers are created to assemble relationships between mappers and perform other post-mapper-creation initialization steps. """ self._configure_started = True self.do_init() self._configure_finished = True @property def class_attribute(self): """Return the class-bound descriptor corresponding to this :class:`.MapperProperty`. This is basically a ``getattr()`` call:: return getattr(self.parent.class_, self.key) I.e. if this :class:`.MapperProperty` were named ``addresses``, and the class to which it is mapped is ``User``, this sequence is possible:: >>> from sqlalchemy import inspect >>> mapper = inspect(User) >>> addresses_property = mapper.attrs.addresses >>> addresses_property.class_attribute is User.addresses True >>> User.addresses.property is addresses_property True """ return getattr(self.parent.class_, self.key) def do_init(self): """Perform subclass-specific initialization post-mapper-creation steps. This is a template method called by the ``MapperProperty`` object's init() method. """ def post_instrument_class(self, mapper): """Perform instrumentation adjustments that need to occur after init() has completed. The given Mapper is the Mapper invoking the operation, which may not be the same Mapper as self.parent in an inheritance scenario; however, Mapper will always at least be a sub-mapper of self.parent. This method is typically used by StrategizedProperty, which delegates it to LoaderStrategy.init_class_attribute() to perform final setup on the class-bound InstrumentedAttribute. """ def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): """Merge the attribute represented by this ``MapperProperty`` from source to destination object. """ def __repr__(self): return '<%s at 0x%x; %s>' % ( self.__class__.__name__, id(self), getattr(self, 'key', 'no key')) class PropComparator(operators.ColumnOperators): """Defines SQL operators for :class:`.MapperProperty` objects. SQLAlchemy allows for operators to be redefined at both the Core and ORM level. :class:`.PropComparator` is the base class of operator redefinition for ORM-level operations, including those of :class:`.ColumnProperty`, :class:`.RelationshipProperty`, and :class:`.CompositeProperty`. .. note:: With the advent of Hybrid properties introduced in SQLAlchemy 0.7, as well as Core-level operator redefinition in SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator` instances is extremely rare. See :ref:`hybrids_toplevel` as well as :ref:`types_operators`. User-defined subclasses of :class:`.PropComparator` may be created. The built-in Python comparison and math operator methods, such as :meth:`.operators.ColumnOperators.__eq__`, :meth:`.operators.ColumnOperators.__lt__`, and :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide new operator behavior. The custom :class:`.PropComparator` is passed to the :class:`.MapperProperty` instance via the ``comparator_factory`` argument. In each case, the appropriate subclass of :class:`.PropComparator` should be used:: # definition of custom PropComparator subclasses from sqlalchemy.orm.properties import \\ ColumnProperty,\\ CompositeProperty,\\ RelationshipProperty class MyColumnComparator(ColumnProperty.Comparator): def __eq__(self, other): return self.__clause_element__() == other class MyRelationshipComparator(RelationshipProperty.Comparator): def any(self, expression): "define the 'any' operation" # ... class MyCompositeComparator(CompositeProperty.Comparator): def __gt__(self, other): "redefine the 'greater than' operation" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) # application of custom PropComparator subclasses from sqlalchemy.orm import column_property, relationship, composite from sqlalchemy import Column, String class SomeMappedClass(Base): some_column = column_property(Column("some_column", String), comparator_factory=MyColumnComparator) some_relationship = relationship(SomeOtherClass, comparator_factory=MyRelationshipComparator) some_composite = composite( Column("a", String), Column("b", String), comparator_factory=MyCompositeComparator ) Note that for column-level operator redefinition, it's usually simpler to define the operators at the Core level, using the :attr:`.TypeEngine.comparator_factory` attribute. See :ref:`types_operators` for more detail. See also: :class:`.ColumnProperty.Comparator` :class:`.RelationshipProperty.Comparator` :class:`.CompositeProperty.Comparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ __slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity' def __init__(self, prop, parentmapper, adapt_to_entity=None): self.prop = self.property = prop self._parententity = adapt_to_entity or parentmapper self._adapt_to_entity = adapt_to_entity def __clause_element__(self): raise NotImplementedError("%r" % self) def _query_clause_element(self): return self.__clause_element__() def adapt_to_entity(self, adapt_to_entity): """Return a copy of this PropComparator which will use the given :class:`.AliasedInsp` to produce corresponding expressions. """ return self.__class__(self.prop, self._parententity, adapt_to_entity) @property def _parentmapper(self): """legacy; this is renamed to _parententity to be compatible with QueryableAttribute.""" return inspect(self._parententity).mapper @property def adapter(self): """Produce a callable that adapts column expressions to suit an aliased version of this comparator. """ if self._adapt_to_entity is None: return None else: return self._adapt_to_entity._adapt_element @property def info(self): return self.property.info @staticmethod def any_op(a, b, **kwargs): return a.any(b, **kwargs) @staticmethod def has_op(a, b, **kwargs): return a.has(b, **kwargs) @staticmethod def of_type_op(a, class_): return a.of_type(class_) def of_type(self, class_): """Redefine this object in terms of a polymorphic subclass. Returns a new PropComparator from which further criterion can be evaluated. e.g.:: query.join(Company.employees.of_type(Engineer)).\\ filter(Engineer.name=='foo') :param \class_: a class or mapper indicating that criterion will be against this specific subclass. """ return self.operate(PropComparator.of_type_op, class_) def any(self, criterion=None, **kwargs): """Return true if this collection contains any member that meets the given criterion. The usual implementation of ``any()`` is :meth:`.RelationshipProperty.Comparator.any`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.any_op, criterion, **kwargs) def has(self, criterion=None, **kwargs): """Return true if this element references a member which meets the given criterion. The usual implementation of ``has()`` is :meth:`.RelationshipProperty.Comparator.has`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.has_op, criterion, **kwargs) class StrategizedProperty(MapperProperty): """A MapperProperty which uses selectable strategies to affect loading behavior. There is a single strategy selected by default. Alternate strategies can be selected at Query time through the usage of ``StrategizedOption`` objects via the Query.options() method. The mechanics of StrategizedProperty are used for every Query invocation for every mapped attribute participating in that Query, to determine first how the attribute will be rendered in SQL and secondly how the attribute will retrieve a value from a result row and apply it to a mapped object. The routines here are very performance-critical. """ __slots__ = '_strategies', 'strategy' strategy_wildcard_key = None def _get_context_loader(self, context, path): load = None # use EntityRegistry.__getitem__()->PropRegistry here so # that the path is stated in terms of our base search_path = dict.__getitem__(path, self) # search among: exact match, "attr.*", "default" strategy # if any. for path_key in ( search_path._loader_key, search_path._wildcard_path_loader_key, search_path._default_path_loader_key ): if path_key in context.attributes: load = context.attributes[path_key] break return load def _get_strategy(self, key): try: return self._strategies[key] except KeyError: cls = self._strategy_lookup(*key) self._strategies[key] = self._strategies[ cls] = strategy = cls(self) return strategy def _get_strategy_by_cls(self, cls): return self._get_strategy(cls._strategy_keys[0]) def setup( self, context, entity, path, adapter, **kwargs): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) else: strat = self.strategy strat.setup_query(context, entity, path, loader, adapter, **kwargs) def create_row_processor( self, context, path, mapper, result, adapter, populators): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) else: strat = self.strategy strat.create_row_processor( context, path, loader, mapper, result, adapter, populators) def do_init(self): self._strategies = {} self.strategy = self._get_strategy_by_cls(self.strategy_class) def post_instrument_class(self, mapper): if not self.parent.non_primary and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper) _all_strategies = collections.defaultdict(dict) @classmethod def strategy_for(cls, **kw): def decorate(dec_cls): # ensure each subclass of the strategy has its # own _strategy_keys collection if '_strategy_keys' not in dec_cls.__dict__: dec_cls._strategy_keys = [] key = tuple(sorted(kw.items())) cls._all_strategies[cls][key] = dec_cls dec_cls._strategy_keys.append(key) return dec_cls return decorate @classmethod def _strategy_lookup(cls, *key): for prop_cls in cls.__mro__: if prop_cls in cls._all_strategies: strategies = cls._all_strategies[prop_cls] try: return strategies[key] except KeyError: pass raise Exception("can't locate strategy for %s %s" % (cls, key)) class MapperOption(object): """Describe a modification to a Query.""" propagate_to_loaders = False """if True, indicate this option should be carried along to "secondary" Query objects produced during lazy loads or refresh operations. """ def process_query(self, query): """Apply a modification to the given :class:`.Query`.""" def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. This is typically used during a lazy load or scalar refresh operation to propagate options stated in the original Query to the new Query being used for the load. It occurs for those options that specify propagate_to_loaders=True. """ self.process_query(query) class LoaderStrategy(object): """Describe the loading behavior of a StrategizedProperty object. The ``LoaderStrategy`` interacts with the querying process in three ways: * it controls the configuration of the ``InstrumentedAttribute`` placed on a class to handle the behavior of the attribute. this may involve setting up class-level callable functions to fire off a select operation when the attribute is first accessed (i.e. a lazy load) * it processes the ``QueryContext`` at statement construction time, where it can modify the SQL statement that is being produced. For example, simple column attributes will add their represented column to the list of selected columns, a joined eager loader may establish join clauses to add to the statement. * It produces "row processor" functions at result fetching time. These "row processor" functions populate a particular attribute on a particular mapped instance. """ __slots__ = 'parent_property', 'is_class_level', 'parent', 'key' def __init__(self, parent): self.parent_property = parent self.is_class_level = False self.parent = self.parent_property.parent self.key = self.parent_property.key def init_class_attribute(self, mapper): pass def setup_query(self, context, entity, path, loadopt, adapter, **kwargs): """Establish column and other state for a given QueryContext. This method fulfills the contract specified by MapperProperty.setup(). StrategizedProperty delegates its setup() method directly to this method. """ def create_row_processor(self, context, path, loadopt, mapper, result, adapter, populators): """Establish row processing functions for a given QueryContext. This method fulfills the contract specified by MapperProperty.create_row_processor(). StrategizedProperty delegates its create_row_processor() method directly to this method. """ def __str__(self): return str(self.parent_property)
0.000186
''' Created on Sep 9, 2013 @author: elif ''' from django import forms from common.forms import ReadOnlyField from django.forms.models import ModelForm from members.models import HsUser class SignupForm(forms.Form): def __init__(self, *args, **kwargs): super(SignupForm, self).__init__(*args, **kwargs) if not self.initial: self.email = forms.CharField(max_length=256, required=True) else: self.fields['email'].widget.attrs['readonly'] = True full_name = forms.CharField(max_length=64) email = ReadOnlyField() cell_phone_number = forms.CharField(max_length=16) is_student = forms.BooleanField(required=False) def save(self, user): user.full_name = self.cleaned_data['full_name'] user.email = self.cleaned_data['email'] user.cell_phone_number = self.cleaned_data['cell_phone_number'] user.is_student = self.cleaned_data['is_student'] user.save() class ExampleForm(forms.Form): username = forms.CharField(max_length=30, label="Username") email = forms.EmailField(label="Email") class HsUserForm(ModelForm): def __init__(self, *args, **kwargs): super(HsUserForm, self).__init__(*args, **kwargs) self.fields['email'].widget.attrs['readonly'] = True class Meta: model = HsUser fields = ('full_name', 'email_visible', 'nickname', 'cell_phone_number', 'cell_phone_number_visible', 'is_student', 'summary', 'reason','id', 'email') #exclude = ('email',)
0.007571
""" Add last_modified_at column for games. Revision ID: e8f30efa9a81 Revises: b7e6bb2a9bb3 Create Date: 2017-01-23 15:26:22.294104 """ from alembic import context, op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'e8f30efa9a81' down_revision = 'b7e6bb2a9bb3' branch_labels = None depends_on = None def upgrade(): """Upgrade database.""" # The following is a ridiculous hack to force table recreation for SQLite to # enable the use of a default timestamp. recreate = 'auto' migrate_context = context.get_context() sqlite_dialect_class = None if getattr(sa.dialects, 'sqlite', False): sqlite_dialect_class = (sa.dialects.sqlite.pysqlite .SQLiteDialect_pysqlite) if migrate_context.dialect.__class__ == sqlite_dialect_class: recreate = 'always' with op.batch_alter_table('games', recreate=recreate) as batch_op: batch_op.add_column(sa.Column('last_modified_at', sa.DateTime(), nullable=False, server_default=sa.func.now())) def downgrade(): """Downgrade database.""" with op.batch_alter_table('games') as batch_op: batch_op.drop_column('last_modified_at')
0.000816
#!/usr/bin/env python import io import os import re from setuptools import setup def _read_text_file(file_name): file_path = os.path.join(os.path.dirname(__file__), file_name) with io.open(file_path, encoding='utf-8') as f_stream: return f_stream.read() def _get_version(): return re.search("__version__\s*=\s*'([^']+)'\s*", _read_text_file('pytest_catchlog.py')).group(1) setup(name='pytest-catchlog', version=_get_version(), description=('py.test plugin to catch log messages.' ' This is a fork of pytest-capturelog.'), long_description='\n'.join([_read_text_file('README.rst'), _read_text_file('CHANGES.rst'), ]), author='Arthur Skowronek (Fork Author)', # original author: Meme Dough author_email='[email protected]', url='https://github.com/eisensheng/pytest-catchlog', py_modules=['pytest_catchlog', ], install_requires=['py>=1.1.1', 'pytest>=2.6'], entry_points={'pytest11': ['pytest_catchlog = pytest_catchlog']}, license='MIT License', zip_safe=False, keywords='py.test pytest logging', classifiers=['Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Testing'])
0.001908
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('jenkins', '0001_initial'), ('projects', '0001_initial'), ('credentials', '0001_initial'), ] operations = [ migrations.CreateModel( name='Archive', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=64)), ('host', models.CharField(max_length=64, null=True, blank=True)), ('policy', models.CharField(default=b'default', max_length=64, choices=[(b'default', b'default'), (b'cdimage', b'cdimage')])), ('basedir', models.CharField(max_length=128)), ('username', models.CharField(max_length=64, null=True, blank=True)), ('transport', models.CharField(max_length=64, choices=[(b'local', b'local'), (b'ssh', b'ssh')])), ('default', models.BooleanField(default=False)), ('base_url', models.CharField(default=b'', max_length=200, blank=True)), ('ssh_credentials', models.ForeignKey(blank=True, to='credentials.SshKeyPair', null=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ArchiveArtifact', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('archived_at', models.DateTimeField(null=True, blank=True)), ('archived_path', models.CharField(max_length=255, null=True, blank=True)), ('archived_size', models.IntegerField(default=0)), ('archive', models.ForeignKey(related_name=b'items', to='archives.Archive')), ('artifact', models.ForeignKey(to='jenkins.Artifact')), ('build', models.ForeignKey(blank=True, to='jenkins.Build', null=True)), ('dependency', models.ForeignKey(blank=True, to='projects.Dependency', null=True)), ('projectbuild_dependency', models.ForeignKey(blank=True, to='projects.ProjectBuildDependency', null=True)), ], options={ 'ordering': ['archived_path'], }, bases=(models.Model,), ), ]
0.005229
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: oneandone_load_balancer short_description: Configure 1&1 load balancer. description: - Create, remove, update load balancers. This module has a dependency on 1and1 >= 1.0 version_added: "2.5" options: state: description: - Define a load balancer state to create, remove, or update. required: false default: 'present' choices: [ "present", "absent", "update" ] auth_token: description: - Authenticating API token provided by 1&1. required: true load_balancer: description: - The identifier (id or name) of the load balancer used with update state. required: true api_url: description: - Custom API URL. Overrides the ONEANDONE_API_URL environement variable. required: false name: description: - Load balancer name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128 required: true health_check_test: description: - Type of the health check. At the moment, HTTP is not allowed. choices: [ "NONE", "TCP", "HTTP", "ICMP" ] required: true health_check_interval: description: - Health check period in seconds. minimum=5, maximum=300, multipleOf=1 required: true health_check_path: description: - Url to call for cheking. Required for HTTP health check. maxLength=1000 required: false health_check_parse: description: - Regular expression to check. Required for HTTP health check. maxLength=64 required: false persistence: description: - Persistence. required: true persistence_time: description: - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1 required: true method: description: - Balancing procedure. choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ] required: true datacenter: description: - ID or country code of the datacenter where the load balancer will be created. default: US choices: [ "US", "ES", "DE", "GB" ] required: false rules: description: - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, port_balancer, and port_server parameters, in addition to source parameter, which is optional. required: true description: description: - Description of the load balancer. maxLength=256 required: false add_server_ips: description: - A list of server identifiers (id or name) to be assigned to a load balancer. Used in combination with update state. required: false remove_server_ips: description: - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state. required: false add_rules: description: - A list of rules that will be added to an existing load balancer. It is syntax is the same as the one used for rules parameter. Used in combination with update state. required: false remove_rules: description: - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state. required: false wait: description: - wait for the instance to be in state 'running' before returning required: false default: "yes" type: bool wait_timeout: description: - how long before wait gives up, in seconds default: 600 wait_interval: description: - Defines the number of seconds to wait when using the _wait_for methods default: 5 requirements: - "1and1" - "python >= 2.6" author: - Amel Ajdinovic (@aajdinov) - Ethan Devenport (@edevenport) ''' EXAMPLES = ''' # Provisioning example. Create and destroy a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key name: ansible load balancer description: Testing creation of load balancer with ansible health_check_test: TCP health_check_interval: 40 persistence: true persistence_time: 1200 method: ROUND_ROBIN datacenter: US rules: - protocol: TCP port_balancer: 80 port_server: 80 source: 0.0.0.0 wait: true wait_timeout: 500 - oneandone_load_balancer: auth_token: oneandone_private_api_key name: ansible load balancer wait: true wait_timeout: 500 state: absent # Update a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer name: ansible load balancer updated description: Testing the update of a load balancer with ansible wait: true wait_timeout: 500 state: update # Add server to a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding server to a load balancer with ansible add_server_ips: - server identifier (id or name) wait: true wait_timeout: 500 state: update # Remove server from a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Removing server from a load balancer with ansible remove_server_ips: - B2504878540DBC5F7634EB00A07C1EBD (server's ip id) wait: true wait_timeout: 500 state: update # Add rules to a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible add_rules: - protocol: TCP port_balancer: 70 port_server: 70 source: 0.0.0.0 - protocol: TCP port_balancer: 60 port_server: 60 source: 0.0.0.0 wait: true wait_timeout: 500 state: update # Remove rules from a load balancer. - oneandone_load_balancer: auth_token: oneandone_private_api_key load_balancer: ansible load balancer updated description: Adding rules to a load balancer with ansible remove_rules: - rule_id #1 - rule_id #2 - ... wait: true wait_timeout: 500 state: update ''' RETURN = ''' load_balancer: description: Information about the load balancer that was processed type: dict sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}' returned: always ''' import os from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.oneandone import ( get_load_balancer, get_server, get_datacenter, OneAndOneResources, wait_for_resource_creation_completion ) HAS_ONEANDONE_SDK = True try: import oneandone.client except ImportError: HAS_ONEANDONE_SDK = False DATACENTERS = ['US', 'ES', 'DE', 'GB'] HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP'] METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS'] def _check_mode(module, result): if module.check_mode: module.exit_json( changed=result ) def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids): """ Assigns servers to a load balancer. """ try: attach_servers = [] for server_id in server_ids: server = get_server(oneandone_conn, server_id, True) attach_server = oneandone.client.AttachServer( server_id=server['id'], server_ip_id=next(iter(server['ips'] or []), None)['id'] ) attach_servers.append(attach_server) if module.check_mode: if attach_servers: return True return False load_balancer = oneandone_conn.attach_load_balancer_server( load_balancer_id=load_balancer_id, server_ips=attach_servers) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id): """ Unassigns a server/IP from a load balancer. """ try: if module.check_mode: lb_server = oneandone_conn.get_load_balancer_server( load_balancer_id=load_balancer_id, server_ip_id=server_ip_id) if lb_server: return True return False load_balancer = oneandone_conn.remove_load_balancer_server( load_balancer_id=load_balancer_id, server_ip_id=server_ip_id) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules): """ Adds new rules to a load_balancer. """ try: load_balancer_rules = [] for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( protocol=rule['protocol'], port_balancer=rule['port_balancer'], port_server=rule['port_server'], source=rule['source']) load_balancer_rules.append(load_balancer_rule) if module.check_mode: lb_id = get_load_balancer(oneandone_conn, load_balancer_id) if (load_balancer_rules and lb_id): return True return False load_balancer = oneandone_conn.add_load_balancer_rule( load_balancer_id=load_balancer_id, load_balancer_rules=load_balancer_rules ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id): """ Removes a rule from a load_balancer. """ try: if module.check_mode: rule = oneandone_conn.get_load_balancer_rule( load_balancer_id=load_balancer_id, rule_id=rule_id) if rule: return True return False load_balancer = oneandone_conn.remove_load_balancer_rule( load_balancer_id=load_balancer_id, rule_id=rule_id ) return load_balancer except Exception as ex: module.fail_json(msg=str(ex)) def update_load_balancer(module, oneandone_conn): """ Updates a load_balancer based on input arguments. Load balancer rules and server ips can be added/removed to/from load balancer. Load balancer name, description, health_check_test, health_check_interval, persistence, persistence_time, and method can be updated as well. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ load_balancer_id = module.params.get('load_balancer') name = module.params.get('name') description = module.params.get('description') health_check_test = module.params.get('health_check_test') health_check_interval = module.params.get('health_check_interval') health_check_path = module.params.get('health_check_path') health_check_parse = module.params.get('health_check_parse') persistence = module.params.get('persistence') persistence_time = module.params.get('persistence_time') method = module.params.get('method') add_server_ips = module.params.get('add_server_ips') remove_server_ips = module.params.get('remove_server_ips') add_rules = module.params.get('add_rules') remove_rules = module.params.get('remove_rules') changed = False load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True) if load_balancer is None: _check_mode(module, False) if (name or description or health_check_test or health_check_interval or health_check_path or health_check_parse or persistence or persistence_time or method): _check_mode(module, True) load_balancer = oneandone_conn.modify_load_balancer( load_balancer_id=load_balancer['id'], name=name, description=description, health_check_test=health_check_test, health_check_interval=health_check_interval, health_check_path=health_check_path, health_check_parse=health_check_parse, persistence=persistence, persistence_time=persistence_time, method=method) changed = True if add_server_ips: if module.check_mode: _check_mode(module, _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)) load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips) changed = True if remove_server_ips: chk_changed = False for server_ip_id in remove_server_ips: if module.check_mode: chk_changed |= _remove_load_balancer_server(module, oneandone_conn, load_balancer['id'], server_ip_id) _remove_load_balancer_server(module, oneandone_conn, load_balancer['id'], server_ip_id) _check_mode(module, chk_changed) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) changed = True if add_rules: load_balancer = _add_load_balancer_rules(module, oneandone_conn, load_balancer['id'], add_rules) _check_mode(module, load_balancer) changed = True if remove_rules: chk_changed = False for rule_id in remove_rules: if module.check_mode: chk_changed |= _remove_load_balancer_rule(module, oneandone_conn, load_balancer['id'], rule_id) _remove_load_balancer_rule(module, oneandone_conn, load_balancer['id'], rule_id) _check_mode(module, chk_changed) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) changed = True try: return (changed, load_balancer) except Exception as ex: module.fail_json(msg=str(ex)) def create_load_balancer(module, oneandone_conn): """ Create a new load_balancer. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ try: name = module.params.get('name') description = module.params.get('description') health_check_test = module.params.get('health_check_test') health_check_interval = module.params.get('health_check_interval') health_check_path = module.params.get('health_check_path') health_check_parse = module.params.get('health_check_parse') persistence = module.params.get('persistence') persistence_time = module.params.get('persistence_time') method = module.params.get('method') datacenter = module.params.get('datacenter') rules = module.params.get('rules') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') wait_interval = module.params.get('wait_interval') load_balancer_rules = [] datacenter_id = None if datacenter is not None: datacenter_id = get_datacenter(oneandone_conn, datacenter) if datacenter_id is None: module.fail_json( msg='datacenter %s not found.' % datacenter) for rule in rules: load_balancer_rule = oneandone.client.LoadBalancerRule( protocol=rule['protocol'], port_balancer=rule['port_balancer'], port_server=rule['port_server'], source=rule['source']) load_balancer_rules.append(load_balancer_rule) _check_mode(module, True) load_balancer_obj = oneandone.client.LoadBalancer( health_check_path=health_check_path, health_check_parse=health_check_parse, name=name, description=description, health_check_test=health_check_test, health_check_interval=health_check_interval, persistence=persistence, persistence_time=persistence_time, method=method, datacenter_id=datacenter_id ) load_balancer = oneandone_conn.create_load_balancer( load_balancer=load_balancer_obj, load_balancer_rules=load_balancer_rules ) if wait: wait_for_resource_creation_completion(oneandone_conn, OneAndOneResources.load_balancer, load_balancer['id'], wait_timeout, wait_interval) load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh changed = True if load_balancer else False _check_mode(module, False) return (changed, load_balancer) except Exception as ex: module.fail_json(msg=str(ex)) def remove_load_balancer(module, oneandone_conn): """ Removes a load_balancer. module : AnsibleModule object oneandone_conn: authenticated oneandone object """ try: lb_id = module.params.get('name') load_balancer_id = get_load_balancer(oneandone_conn, lb_id) if module.check_mode: if load_balancer_id is None: _check_mode(module, False) _check_mode(module, True) load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id) changed = True if load_balancer else False return (changed, { 'id': load_balancer['id'], 'name': load_balancer['name'] }) except Exception as ex: module.fail_json(msg=str(ex)) def main(): module = AnsibleModule( argument_spec=dict( auth_token=dict( type='str', default=os.environ.get('ONEANDONE_AUTH_TOKEN')), api_url=dict( type='str', default=os.environ.get('ONEANDONE_API_URL')), load_balancer=dict(type='str'), name=dict(type='str'), description=dict(type='str'), health_check_test=dict( choices=HEALTH_CHECK_TESTS), health_check_interval=dict(type='str'), health_check_path=dict(type='str'), health_check_parse=dict(type='str'), persistence=dict(type='bool'), persistence_time=dict(type='str'), method=dict( choices=METHODS), datacenter=dict( choices=DATACENTERS), rules=dict(type='list', default=[]), add_server_ips=dict(type='list', default=[]), remove_server_ips=dict(type='list', default=[]), add_rules=dict(type='list', default=[]), remove_rules=dict(type='list', default=[]), wait=dict(type='bool', default=True), wait_timeout=dict(type='int', default=600), wait_interval=dict(type='int', default=5), state=dict(type='str', default='present', choices=['present', 'absent', 'update']), ), supports_check_mode=True ) if not HAS_ONEANDONE_SDK: module.fail_json(msg='1and1 required for this module') if not module.params.get('auth_token'): module.fail_json( msg='auth_token parameter is required.') if not module.params.get('api_url'): oneandone_conn = oneandone.client.OneAndOneService( api_token=module.params.get('auth_token')) else: oneandone_conn = oneandone.client.OneAndOneService( api_token=module.params.get('auth_token'), api_url=module.params.get('api_url')) state = module.params.get('state') if state == 'absent': if not module.params.get('name'): module.fail_json( msg="'name' parameter is required for deleting a load balancer.") try: (changed, load_balancer) = remove_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) elif state == 'update': if not module.params.get('load_balancer'): module.fail_json( msg="'load_balancer' parameter is required for updating a load balancer.") try: (changed, load_balancer) = update_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) elif state == 'present': for param in ('name', 'health_check_test', 'health_check_interval', 'persistence', 'persistence_time', 'method', 'rules'): if not module.params.get(param): module.fail_json( msg="%s parameter is required for new load balancers." % param) try: (changed, load_balancer) = create_load_balancer(module, oneandone_conn) except Exception as ex: module.fail_json(msg=str(ex)) module.exit_json(changed=changed, load_balancer=load_balancer) if __name__ == '__main__': main()
0.001441
import requests from cloudbot import hook # CONSTANTS exchanges = { "blockchain": { "api_url": "https://blockchain.info/ticker", "func": lambda data: "Blockchain // Buy: \x0307${:,.2f}\x0f -" " Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"]) }, "coinbase": { "api_url": "https://coinbase.com/api/v1/prices/spot_rate", "func": lambda data: "Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount'])) }, "bitpay": { "api_url": "https://bitpay.com/api/rates", "func": lambda data: "Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate']) }, "bitstamp": { "api_url": "https://www.bitstamp.net/api/ticker/", "func": lambda data: "BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -" " Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']), float(data['high']), float(data['low']), float(data['volume'])) } } # HOOK FUNCTIONS @hook.command("btc", "bitcoin", autohelp=False) def bitcoin(text, notice): """[bitpay|coinbase|bitstamp] - gets bitcoin exchange rate using <exchange>, defaulting to blockchain :type text: str """ text = text.lower() if text: if text in exchanges: exchange = exchanges[text] else: valid_exchanges = list(exchanges.keys()) notice("Invalid exchange '{}', valid exchanges are {} and {}".format(text, ", ".join(valid_exchanges[:-1]), valid_exchanges[-1])) return else: exchange = exchanges["bitstamp"] response = requests.get(exchange["api_url"]) if response.status_code != requests.codes.ok: return "Error reaching {}: {}".format(text or "blockchain", response.status_code) func = exchange["func"] return func(response.json()) @hook.command("ltc", "litecoin", autohelp=False) def litecoin(message): """- gets litecoin exchange rate from BTC-E""" response = requests.get("https://btc-e.com/api/2/ltc_usd/ticker") if response.status_code != requests.codes.ok: return "Error reaching btc-e.com: {}".format(response.status_code) data = response.json() ticker = data['ticker'] message("Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f" " - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'], ticker['vol_cur']))
0.004836
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : DB Manager Description : Database manager plugin for QGIS (Oracle) Date : Aug 27, 2014 copyright : (C) 2014 by Médéric RIBREUX email : [email protected] The content of this file is based on - PG_Manager by Martin Dobias <[email protected]> (GPLv2 license) - DB Manager by Giuseppe Sucameli <[email protected]> (GPLv2 license) ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from qgis.PyQt.QtWidgets import QApplication from qgis.core import QgsWkbTypes from ..info_model import TableInfo, VectorTableInfo, DatabaseInfo from ..html_elems import HtmlContent, HtmlSection, HtmlParagraph, \ HtmlTable, HtmlTableHeader, HtmlTableCol # Syntax Highlight for VIEWS/MVIEWS from pygments import highlight from pygments.lexers import get_lexer_by_name from pygments.formatters import HtmlFormatter class ORDatabaseInfo(DatabaseInfo): def __init__(self, db): self.db = db def connectionDetails(self): tbl = [] if self.db.connector.host != u"": tbl.append((QApplication.translate("DBManagerPlugin", "Host:"), self.db.connector.host)) tbl.append((QApplication.translate("DBManagerPlugin", "Database:"), self.db.connector.dbname)) tbl.append((QApplication.translate("DBManagerPlugin", "User:"), self.db.connector.user)) tbl.append((QApplication.translate("DBManagerPlugin", "SQLite list tables cache:"), "Enabled" if self.db.connector.hasCache else "Unavailable")) return HtmlTable(tbl) def spatialInfo(self): ret = [] info = self.db.connector.getSpatialInfo() if not info: return tbl = [ (QApplication.translate("DBManagerPlugin", "Oracle\ Spatial:"), info[0]) ] ret.append(HtmlTable(tbl)) if not self.db.connector.has_geometry_columns: ret.append( HtmlParagraph( QApplication.translate( "DBManagerPlugin", (u"<warning> ALL_SDO_GEOM_METADATA" u" view doesn't exist!\n" u"This view is essential for many" u"GIS applications for enumeration of tables.")))) return ret def privilegesDetails(self): """ find if user can create schemas (CREATE ANY TABLE or something)""" # TODO return None class ORTableInfo(TableInfo): def __init__(self, table): self.table = table if not self.table.objectType: self.table.getType() if not self.table.comment: self.table.getComment() if not self.table.estimatedRowCount and not self.table.isView: self.table.refreshRowEstimation() if not self.table.creationDate: self.table.getDates() def generalInfo(self): ret = [] # if the estimation is less than 100 rows, try to count them - it # shouldn't take long time if (not self.table.isView and not self.table.rowCount and self.table.estimatedRowCount < 100): # row count information is not displayed yet, so just block # table signals to avoid double refreshing # (infoViewer->refreshRowCount->tableChanged->infoViewer) self.table.blockSignals(True) self.table.refreshRowCount() self.table.blockSignals(False) relation_type = QApplication.translate( "DBManagerPlugin", self.table.objectType) tbl = [ (QApplication.translate("DBManagerPlugin", "Object type:"), relation_type), (QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner) ] if self.table.comment: tbl.append( (QApplication.translate( "DBManagerPlugin", "Comment:"), self.table.comment)) # Estimated rows if not self.table.isView: tbl.append( (QApplication.translate( "DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount) ) if self.table.rowCount is not None and self.table.rowCount >= 0: # Add a real count of rows tbl.append( (QApplication.translate("DBManagerPlugin", "Rows (counted):"), self.table.rowCount) ) else: tbl.append( (QApplication.translate("DBManagerPlugin", "Rows (counted):"), 'Unknown (<a href="action:rows/recount">find out</a>)') ) # Add creation and modification dates if self.table.creationDate: tbl.append( (QApplication.translate("DBManagerPlugin", "Creation Date:"), self.table.creationDate)) if self.table.modificationDate: tbl.append( (QApplication.translate( "DBManagerPlugin", "Last Modification Date:"), self.table.modificationDate)) # privileges # has the user access to this schema? schema_priv = self.table.database().connector.getSchemaPrivileges( self.table.schemaName()) if self.table.schema() else None if not schema_priv: pass elif schema_priv[1] is False: # no usage privileges on the schema tbl.append((QApplication.translate( "DBManagerPlugin", "Privileges:"), QApplication.translate( "DBManagerPlugin", (u"<warning> This user doesn't have usage privileges" u"for this schema!")))) else: table_priv = self.table.database().connector.getTablePrivileges( (self.table.schemaName(), self.table.name)) privileges = [] if table_priv[0]: privileges.append("select") if table_priv[1]: privileges.append("insert") if table_priv[2]: privileges.append("update") if table_priv[3]: privileges.append("delete") if len(privileges) > 0: priv_string = u", ".join(privileges) else: priv_string = QApplication.translate( "DBManagerPlugin", '<warning> This user has no privileges!') tbl.append( (QApplication.translate( "DBManagerPlugin", "Privileges:"), priv_string)) ret.append(HtmlTable(tbl)) if schema_priv and schema_priv[1]: if (table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]): ret.append( HtmlParagraph(QApplication.translate( "DBManagerPlugin", "<warning> This user has read-only privileges."))) # primary key defined? if (not self.table.isView and self.table.objectType != u"MATERIALIZED VIEW"): pk = [fld for fld in self.table.fields() if fld.primaryKey] if len(pk) <= 0: ret.append( HtmlParagraph(QApplication.translate( "DBManagerPlugin", "<warning> No primary key defined for this table!"))) return ret def getSpatialInfo(self): ret = [] info = self.db.connector.getSpatialInfo() if not info: return tbl = [ (QApplication.translate( "DBManagerPlugin", "Library:"), info[0]) # , ] ret.append(HtmlTable(tbl)) if not self.db.connector.has_geometry_columns: ret.append(HtmlParagraph( QApplication.translate( "DBManagerPlugin", (u"<warning> ALL_SDO_GEOM_METADATA table doesn't exist!\n" u"This table is essential for many GIS" u"applications for enumeration of tables.")))) return ret def fieldsDetails(self): tbl = [] # define the table header header = ( "#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"), QApplication.translate("DBManagerPlugin", "Default"), QApplication.translate("DBManagerPlugin", "Comment")) tbl.append(HtmlTableHeader(header)) # add table contents for fld in self.table.fields(): char_max_len = fld.charMaxLen if fld.charMaxLen else "" if fld.modifier: char_max_len = u"{},{}".format(char_max_len, fld.modifier) is_null_txt = "N" if fld.notNull else "Y" # make primary key field underlined attrs = {"class": "underline"} if fld.primaryKey else None name = HtmlTableCol(fld.name, attrs) tbl.append( (fld.num, name, fld.type2String(), char_max_len, is_null_txt, fld.default2String(), fld.comment)) return HtmlTable(tbl, {"class": "header"}) def constraintsDetails(self): if not self.table.constraints(): return None tbl = [] # define the table header header = (QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Column"), QApplication.translate("DBManagerPlugin", "Status"), QApplication.translate("DBManagerPlugin", "Validated"), QApplication.translate("DBManagerPlugin", "Generated"), QApplication.translate("DBManagerPlugin", "Check condition"), QApplication.translate("DBManagerPlugin", "Foreign Table"), QApplication.translate("DBManagerPlugin", "Foreign column"), QApplication.translate("DBManagerPlugin", "On Delete")) tbl.append(HtmlTableHeader(header)) # add table contents for con in self.table.constraints(): tbl.append((con.name, con.type2String(), con.column, con.status, con.validated, con.generated, con.checkSource, con.foreignTable, con.foreignKey, con.foreignOnDelete)) return HtmlTable(tbl, {"class": "header"}) def indexesDetails(self): if not self.table.indexes(): return None tbl = [] # define the table header header = (QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Column(s)"), QApplication.translate("DBManagerPlugin", "Index Type"), QApplication.translate("DBManagerPlugin", "Status"), QApplication.translate("DBManagerPlugin", "Last analyzed"), QApplication.translate("DBManagerPlugin", "Compression"), QApplication.translate("DBManagerPlugin", "Uniqueness"), QApplication.translate("DBManagerPlugin", "Action")) tbl.append(HtmlTableHeader(header)) # add table contents for idx in self.table.indexes(): # get the fields the index is defined on tbl.append((idx.name, idx.column, idx.indexType, idx.status, idx.analyzed, idx.compression, idx.isUnique, (u'<a href="action:index/{}/rebuild">Rebuild' u"""</a>""".format(idx.name)))) return HtmlTable(tbl, {"class": "header"}) def triggersDetails(self): if not self.table.triggers(): return None ret = [] tbl = [] # define the table header header = ( QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Event"), QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Enabled")) tbl.append(HtmlTableHeader(header)) # add table contents for trig in self.table.triggers(): name = (u"""{0} (<a href="action:trigger/""" u"""{0}/{1}">{1}</a>)""".format(trig.name, "delete")) if trig.enabled == u"ENABLED": enabled, action = ( QApplication.translate("DBManagerPlugin", "Yes"), u"disable") else: enabled, action = ( QApplication.translate("DBManagerPlugin", "No"), "enable") txt_enabled = (u"""{0} (<a href="action:trigger/""" u"""{1}/{2}">{2}</a>)""".format( enabled, trig.name, action)) tbl.append((name, trig.event, trig.type, txt_enabled)) ret.append(HtmlTable(tbl, {"class": "header"})) ret.append( HtmlParagraph( QApplication.translate( "DBManagerPlugin", (u'<a href="action:triggers/enable">' u'Enable all triggers</a> / ' u'<a href="action:triggers/disable">' u'Disable all triggers</a>')))) return ret def getTableInfo(self): ret = [] general_info = self.generalInfo() if not general_info: pass else: ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'General info'), general_info)) # spatial info spatial_info = self.spatialInfo() if not spatial_info: pass else: spatial_info = HtmlContent(spatial_info) if not spatial_info.hasContents(): spatial_info = QApplication.translate( "DBManagerPlugin", '<warning> This is not a spatial table.') ret.append( HtmlSection( self.table.database().connection().typeNameString(), spatial_info)) # fields fields_details = self.fieldsDetails() if not fields_details: pass else: ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'Fields'), fields_details)) # constraints constraints_details = self.constraintsDetails() if not constraints_details: pass else: ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'Constraints'), constraints_details)) # indexes indexes_details = self.indexesDetails() if not indexes_details: pass else: ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'Indexes'), indexes_details)) # triggers triggers_details = self.triggersDetails() if not triggers_details: pass else: ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'Triggers'), triggers_details)) if self.table.objectType == u"MATERIALIZED VIEW": mview_info = self.getMViewInfo() ret.append( HtmlSection( QApplication.translate( "DBManagerPlugin", 'Materialized View information'), mview_info)) return ret def getMViewInfo(self): """If the table is a materialized view, grab more information... """ ret = [] tbl = [] values = self.table.getMViewInfo() tbl.append((QApplication.translate("DBManagerPlugin", "Refresh Mode:"), values[0])) tbl.append((QApplication.translate("DBManagerPlugin", "Refresh Method:"), values[1])) tbl.append((QApplication.translate("DBManagerPlugin", "Build Mode:"), values[2])) tbl.append((QApplication.translate("DBManagerPlugin", "Last Refresh Date:"), values[5])) tbl.append((QApplication.translate("DBManagerPlugin", "Last Refresh Type:"), values[4])) tbl.append((QApplication.translate("DBManagerPlugin", "Fast Refreshable:"), values[3])) tbl.append((QApplication.translate("DBManagerPlugin", "Staleness:"), values[6])) tbl.append((QApplication.translate("DBManagerPlugin", "Stale since:"), values[7])) tbl.append((QApplication.translate("DBManagerPlugin", "Compile State:"), values[8])) tbl.append((QApplication.translate("DBManagerPlugin", "Use no index:"), values[9])) tbl.append((QApplication.translate( "DBManagerPlugin", (u'<a href="action:mview/refresh">Refresh the materializ' u'ed view</a>')), u"")) ret.append(HtmlTable(tbl)) return ret def getViewInfo(self): """If the table is a view or a materialized view, add the definition of the view. """ if self.table.objectType not in [u"VIEW", u"MATERIALIZED VIEW"]: return [] ret = self.getTableInfo() # view definition view_def = self.table.getDefinition() # Syntax highlight lexer = get_lexer_by_name("sql") formatter = HtmlFormatter( linenos=True, cssclass="source", noclasses=True) result = highlight(view_def, lexer, formatter) if view_def: if self.table.objectType == u"VIEW": title = u"View Definition" else: title = u"Materialized View Definition" ret.append( HtmlSection( QApplication.translate("DBManagerPlugin", title), result)) return ret def toHtml(self): if self.table.objectType in [u"VIEW", u"MATERIALIZED VIEW"]: ret = self.getViewInfo() else: ret = self.getTableInfo() return HtmlContent(ret).toHtml() class ORVectorTableInfo(ORTableInfo, VectorTableInfo): def __init__(self, table): VectorTableInfo.__init__(self, table) ORTableInfo.__init__(self, table) def spatialInfo(self): ret = [] if not self.table.geomType: return ret tbl = [ (QApplication.translate("DBManagerPlugin", "Column:"), self.table.geomColumn), (QApplication.translate("DBManagerPlugin", "Geometry:"), self.table.geomType), (QApplication.translate("DBManagerPlugin", "QGIS Geometry type:"), QgsWkbTypes.displayString(self.table.wkbType)) ] # only if we have info from geometry_columns if self.table.geomDim: tbl.append( (QApplication.translate( "DBManagerPlugin", "Dimension:"), self.table.geomDim)) srid = self.table.srid if self.table.srid else -1 if srid != -1: sr_info = ( self.table.database().connector.getSpatialRefInfo(srid)) else: sr_info = QApplication.translate("DBManagerPlugin", "Undefined") if sr_info: tbl.append( (QApplication.translate( "DBManagerPlugin", "Spatial ref:"), u"{0} ({1})".format(sr_info, srid))) # estimated extent if not self.table.estimatedExtent: # estimated extent information is not displayed yet, so just block # table signals to avoid double refreshing # (infoViewer->refreshEstimatedExtent->tableChanged->infoViewer) self.table.blockSignals(True) self.table.refreshTableEstimatedExtent() self.table.blockSignals(False) if self.table.estimatedExtent: estimated_extent_str = (u"{:.9f}, {:.9f} - {:.9f}, " u"{:.9f}".format( *self.table.estimatedExtent)) tbl.append( (QApplication.translate( "DBManagerPlugin", "Estimated extent:"), estimated_extent_str)) # extent extent_str = None if self.table.extent and len(self.table.extent) == 4: extent_str = (u"{:.9f}, {:.9f} - {:.9f}, " u"{:.9f}".format(*self.table.extent)) elif (self.table.rowCount is not None and self.table.rowCount > 0) or (self.table.estimatedRowCount is not None and self.table.estimatedRowCount > 0): # Can't calculate an extent on empty layer extent_str = QApplication.translate( "DBManagerPlugin", '(unknown) (<a href="action:extent/get">find out</a>)') if extent_str: tbl.append( (QApplication.translate( "DBManagerPlugin", "Extent:"), extent_str)) ret.append(HtmlTable(tbl)) # Handle extent update metadata if (self.table.extent and self.table.extent != self.table.estimatedExtent and self.table.canUpdateMetadata()): ret.append( HtmlParagraph( QApplication.translate( "DBManagerPlugin", (u'<warning> Metadata extent is different from' u'real extent. You should <a href="action:extent' u'/update">update it</a>!')))) # is there an entry in geometry_columns? if self.table.geomType.lower() == 'geometry': ret.append( HtmlParagraph( QApplication.translate( "DBManagerPlugin", "<warning> There is no entry in geometry_columns!"))) # find out whether the geometry column has spatial index on it if not self.table.isView: if not self.table.hasSpatialIndex(): ret.append( HtmlParagraph( QApplication.translate( "DBManagerPlugin", (u'<warning> No spatial index defined (<a href=' u'"action:spatialindex/create">' u'create it</a>).')))) return ret
0.00004
#!/usr/bin/env python # coding=utf-8 import pylab as pl import numpy as np from matplotlib.legend_handler import HandlerLine2D f = file("table3") next(f) next(f) a = [map(eval,l.split()[::2]) for l in f] a = [x for x in a if x[0] > 0 and x[3] == 25] pl.figure(figsize=(10, 5), dpi=80) pl.subplots_adjust(bottom=0.2, left=0.1, top=0.9, right=0.95) hm = {} for i, q in enumerate(sorted(set((x[0], x[1]) for x in a))): X = [x[2] for x in a if tuple(x[:2]) == q] Y = [x[4] for x in a if tuple(x[:2]) == q] l, = pl.plot(X, Y, "pos*hd"[i], label="%d Kern%s, %d Thread%s" % (q[0], "e"*(q[0]!=1), q[1] + 1, "s"*(q[1]>0))) hm[l] = HandlerLine2D(numpoints=1) xticks = X pl.xlabel(u"Taktfrequenz in MHz") pl.ylabel(u"Ausführungszeit in s") pl.legend(loc='upper right', prop={"size": 12}, handler_map=hm) pl.grid(True, which='major') pl.xticks(xticks, [240, '', '', '', 360, '', '', 480, '', 600, '', '', '', 720, '', '', 816, '', 912, '', 1008]) #pl.xlim(200, 1008 + 40) pl.ylim(0, 100) pl.savefig("cubie-time.pdf") pl.show()
0.01069
# This file is part of wger Workout Manager. # # wger Workout Manager is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # wger Workout Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License """ Custom middleware """ # Standard Library import logging # Django from django.conf import settings from django.contrib import auth from django.contrib.auth import login as django_login from django.utils.deprecation import MiddlewareMixin from django.utils.functional import SimpleLazyObject # wger from wger.core.demo import create_temporary_user logger = logging.getLogger(__name__) SPECIAL_PATHS = ('dashboard',) def check_current_request(request): """ Simple helper function that checks whether the current request hit one of the 'special' paths (paths that need a logged in user). """ # Don't create guest users for requests that are accessing the site # through the REST API if 'api' in request.path: return False # Other paths match = False for path in SPECIAL_PATHS: if path in request.path: match = True return match def get_user(request): if not hasattr(request, '_cached_user'): create_user = check_current_request(request) user = auth.get_user(request) # Set the flag in the session if not request.session.get('has_demo_data'): request.session['has_demo_data'] = False # Django didn't find a user, so create one now if settings.WGER_SETTINGS['ALLOW_GUEST_USERS'] and \ request.method == 'GET' and \ create_user and not user.is_authenticated: logger.debug('creating a new guest user now') user = create_temporary_user() django_login(request, user) request._cached_user = user return request._cached_user class WgerAuthenticationMiddleware(MiddlewareMixin): """ Small wrapper around django's own AuthenticationMiddleware. Simply creates a new user with a temporary flag if the user hits certain URLs that need a logged in user """ def process_request(self, request): assert hasattr(request, 'session'), "The Django authentication middleware requires " "session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert" "'django.contrib.sessions.middleware.SessionMiddleware'." request.user = SimpleLazyObject(lambda: get_user(request)) class RobotsExclusionMiddleware(MiddlewareMixin): """ Simple middleware that sends the "X-Robots-Tag" tag for the URLs used in our WgerAuthenticationMiddleware so that those pages are not indexed. """ def process_response(self, request, response): # Don't set it if it's already in the response if check_current_request(request) and response.get('X-Robots-Tag', None) is None: response['X-Robots-Tag'] = 'noindex, nofollow' return response class JavascriptAJAXRedirectionMiddleware(MiddlewareMixin): """ Middleware that sends helper headers when working with AJAX. This is used for AJAX forms due to limitations of javascript. The way it was done before was to load the whole redirected page, then read from a DIV in the page and redirect to that URL. This now just sends a header when the form was called via the JS function wgerFormModalDialog() and no errors are present. """ def process_response(self, request, response): if request.META.get('HTTP_X_WGER_NO_MESSAGES') and b'has-error' not in response.content: logger.debug('Sending X-wger-redirect') response['X-wger-redirect'] = request.path response.content = request.path return response
0.000953
#Written by Timothy Seabrook #[email protected] #This whole script takes a bit too long to run and didn't end up being too effective. #The basic idea is: #1. Detect edges using a canny filter (This in itself isn't reliable enough) #2. Group edges into 'shapes' permitting that some gaps may exist #3. For each shape, use a line-of-fit split-and-merge strategy to form straight lines from pixels #4. Convert shapes into graphs - lines to nodes and edges #5. Find cycles in graphs to identify convex shapes #6. Threshold convex shapes to identify craters import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import math import os from skimage import data, color from skimage.transform import hough_circle, hough_circle_peaks from skimage.feature import canny from skimage.draw import circle_perimeter from skimage.util import img_as_ubyte from scipy.sparse import csr_matrix from graphCycles import Graph import split_and_merge as sm from PIL import Image import glymur import gdal def edgeCluster(edges, max_step): #edgeCluster algorithm #Perform a walk from each edge pixel #max_step determines how far a pixel can be for it # to be considered part of the same edge w, h = edges.shape[1], edges.shape[0] #size of search area labels = np.zeros((h, w), dtype=np.uint32) #uint32 covers 0 to 4,294,967,295 data = np.where(edges) nextLabel = 0 #Region ID (0 means unlabelled) checkList = [] #Initialise checklist, contains pixels for neighbourhood traversability checks num_total = len(data[0]) #Count number of valid unlabelled pixels num_complete = 0 #Initialise counter ind = 0 #BEGIN CONNECTED COMPONENTS ALGORITHM while(num_complete < num_total): nextLabel += 1 #Increment label class ID y, x = data[0][ind], data[1][ind] while(labels[y,x] != 0): ind += 1 y, x = data[0][ind], data[1][ind] labels[y,x] = nextLabel #Add next pixel to the new label class if checkList.__len__() == 0: #Create a list of pixels for FloodFill neighbour checking checkList = [[y, x]] else: checkList = checkList.append([y, x]) #BEGIN FLOODFILL ALGORITHM while checkList.__len__() > 0: #Whilst there are qualifying pixels in this iteration of FloodFill y, x = checkList.pop() #Take pixel from checklist, to find qualifying neighbours num_complete += 1 #update count for timer #BEGIN LOCATION SPECIFIC NEIGHBOUR INDEXING if x > (max_step-1): xmin = -max_step if x < (w - max_step): #middle column xmax = 1+max_step else: #rightmost column xmax = 1+(w-x-1) else: #leftmost column xmax = 1+max_step xmin = -x if y > (max_step-1): ymin = -max_step if y < (h - max_step): #middle row ymax = 1+max_step else: #bottom row ymax = 1+(h-y-1) else: #top row ymax = 1+max_step ymin = -y #END LOCATION SPECIFIC NEIGHBOUR INDEXING #BEGIN NEIGHBOUR TRAVERSABILITY CHECK for i in range(xmin, xmax): for j in range(ymin, ymax): #for all neighbouring pixels if (((j == 0) & (i == 0))!=True): #not including current pixel if(labels[y + j, x + i] == 0): if edges[y+j,x+i] == True: #and only considering unlabeled pixels labels[y+j,x+i] = nextLabel checkList.append([y+j,x+i]) #END NEIGHBOUR TRAVERSABILITY CHECK #END FLOODFILL ALGORITHM #seeds = np.where(labels == 0) #Reset candidate seeds #END CONNECTED COMPONENTS ALGORITHM cols = np.arange(labels.size) M = csr_matrix((cols, (labels.ravel(), cols)), shape=(labels.max() + 1, labels.size)) indices = [np.unravel_index(row.data, labels.shape) for row in M] counts = np.zeros((np.max(labels)+1)) for i in range(np.max(labels)+1): counts[i] = indices[i][0].size return indices, counts #return labels #return labels and count #base_folder = "/Volumes/DATA DISK/PDS_FILES/LROC_NAC/m108898482_cdr_w_jp2/" #base_filename ="m108898482_cdr_jp2" base_folder = "/Users/seabrook/Documents/FDL/FDL-LunarResources/PDS_FILES/LROC_NAC/" base_filename = "M1106504662RE" filename = base_folder+"P26_0-18000.txt" d = [] with open(filename,'rb') as source: for line in source: fields = line.split('\t') d.append(fields) hypothesis = 4 num_nodes = 0 for n in range(len(d)-1): #base_filename = d[n+1][0] num_lil_craters = 0 num_craters = 0 num_bigcraters = 0 #curr_filename = filename+str(n+1)+'.jp2' curr_filename = base_folder+base_filename+'.tif' ds = gdal.Open(curr_filename) image = np.array(ds.GetRasterBand(1).ReadAsArray()) #curr_filename = base_folder+base_filename+'_p'+str(n+1)+'.tif' #if not os.path.isdir(base_folder + 'p' + str(n + 1) + "/"): # os.mkdir(base_folder + 'p' + str(n + 1) + "/") # Load picture and detect edges #image = glymur.Jp2k(curr_filename)[:] # Low threshold and High threshold represent number of pixels that may be skipped to make a line [4, 60 seems good] # Sigma represents the width of the guassian smoothing kernel [3 seems good] edges = canny(image, sigma=3, low_threshold=4, high_threshold=50) #fig, axarr = plt.subplots(ncols=2, nrows=1, figsize=(10, 4)) #axarr[1].imshow(image, cmap=plt.cm.gray) #plt.show() lines, counts = edgeCluster(edges,3) #segments = np.zeros(len(lines)) segmentParent = np.zeros(len(lines), dtype=int) #data = np.where(edges) for i in range(1,len(lines)): if i == 1: segments = sm.split_and_merge(lines[i], 1) segmentParent[i] = len(segments) else: segments = np.hstack((segments, sm.split_and_merge(lines[i], 0.5))) segmentParent[i] = segments.size #cm = plt.get_cmap('gist_rainbow') #fig1, axarr = plt.subplots(ncols=2, nrows=1) #axarr[0].imshow(edges, cmap=plt.cm.gray) #axarr[1].imshow(image, cmap=plt.cm.gray) #axarr[1].set_color_cycle([cm(1. * i / 20) for i in range(20)]) #for i in range(1,len(lines)): # y, x = lines[i] # axarr[1].scatter(x, y, alpha=0.8, edgecolors='none', s=1) #fig2, axarr = plt.subplots(ncols=2, nrows=1) #axarr[0].imshow(image, cmap=plt.cm.gray) #axarr[1].imshow(image, cmap=plt.cm.gray) #For every grouped line nodes = [] for i in range(1,len(lines)): first = segmentParent[i-1] last = segmentParent[i] #For every segment of line #plt.axes(axarr[0]) for j in range(first,last): sm.generate_line_ends(segments[j]) # plt.plot([segments[j].start[1], segments[j].end[1]], [segments[j].start[0], segments[j].end[0]], 'r-') #Hypothesis 1 # proposal: extend all lines by a scalar value to encourage intersection # result: poor, some lines that already intersect do not need additional reach # some lines require larger reach still to make important intersections # conclusion: We require a dynamic value per line, based on context? # #Hypothesis 2 # proposal: where two lines can intersect if extended by max((end-mean/2),max_extend) # they should be # result: decent, large lines extend too far, most 'easy' craters get captured. # conclusion: distance between ends of lines is probably better than distance to intersection # #If a line can be extended to intersect another, within the bounds of the others data points #Then it should do so. #Max extension (in x) permissible for each of two lines to intersect ############################################################################## if(hypothesis == 2): max_extend = 5 for j in range(first, last): for k in range(first,last): if(j < k): #Do these lines intersect? if(segments[j].slope[0] == segments[k].slope[0]): #They never intersect intersect = False else: #They intersect at [x_cross, y_cross] #a1x + b1 = a2x + b2 #(a1 - a2)x = (b2 - b1) #x = (b2-b1)/(a1-a2) x_cross = np.divide((segments[k].intercept - segments[j].intercept),\ (segments[j].slope[0] - segments[k].slope[0])) #y = ax + b y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept #Check that intersection point lies within bounds of map if((x_cross > 0) & (x_cross < edges.shape[0]) & (y_cross > 0) & (y_cross < edges.shape[1])): #If x_cross is outside of segment j's maximal bounds if (x_cross > segments[j].max[0]): #check that x_cross is close enough to j to warrant intersection if ((x_cross - segments[j].max[0]) < np.maximum(np.multiply(0.5,( np.max(segments[j].data[0]) - segments[j].mean[0])),max_extend)): #If x_cross is outside of segment k's maximals bounds if (x_cross > segments[k].max[0]): # check that x_cross is close enough to k to warrant intersection if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5, ( np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)): #If it is, update k(max) segments[k].max[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].max[1] = y_cross else: segments[k].min[1] = y_cross #update j(max) segments[j].max[0] = x_cross if segments[j].slope[0] >= 0: segments[j].max[1] = y_cross else: segments[j].min[1] = y_cross else: # If x_cross is outside of segment k's minimal bounds if (x_cross < segments[k].min[0]): # check that x_cross is close enough to k to warrant intersection if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, ( segments[k].mean[0] - np.min(segments[k].data[0]))),max_extend)): # If it is, update k(min) segments[k].min[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].min[1] = y_cross else: segments[k].max[1] = y_cross #update j(max) segments[j].max[0] = x_cross if segments[j].slope[0] >= 0: segments[j].max[1] = y_cross else: segments[j].min[1] = y_cross else: #x_cross is within bounds of k # update j(max) segments[j].max[0] = x_cross if segments[j].slope[0] >= 0: segments[j].max[1] = y_cross else: segments[j].min[1] = y_cross else: # If x_cross is outside of segment j's minimal bounds if (x_cross < segments[j].min[0]): # check that x_cross is close enough to j to warrant intersection if((segments[j].min[0] - x_cross) < np.maximum(np.multiply(0.5,( segments[j].mean[0] - np.min(segments[j].data[0]))),max_extend)): # If x_cross is outside of segment k's maximal bounds if (x_cross > segments[k].max[0]): # check that x_cross is close enough to k to warrant intersection if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5,( np.max(segments[k].data[0]) - segments[k].mean[0])),max_extend)): # If it is, update k(max) segments[k].max[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].max[1] = y_cross else: segments[k].min[1] = y_cross # update j(min) segments[j].min[0] = x_cross if segments[j].slope[0] >= 0: segments[j].min[1] = y_cross else: segments[j].max[1] = y_cross else: # If x_cross is outside of segment k's minimal bounds if (x_cross < segments[k].min[0]): # check that x_cross is close enough to k to warrant intersection if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, ( segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)): # If it is, update k(min) segments[k].min[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].min[1] = y_cross else: segments[k].max[1] = y_cross # update j(min) segments[j].min[0] = x_cross if segments[j].slope[0] >= 0: segments[j].min[1] = y_cross else: segments[j].max[1] = y_cross else: #x_cross is within bounds of k # update j(max) segments[j].min[0] = x_cross if segments[j].slope[0] >= 0: segments[j].min[1] = y_cross else: segments[j].max[1] = y_cross else: #x_cross is within bounds of j # If x_cross is outside of segment k's maximals bounds if (x_cross > segments[k].max[0]): # check that x_cross is close enough to k to warrant intersection if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5, (np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)): # If it is, update k(max) segments[k].max[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].max[1] = y_cross else: segments[k].min[1] = y_cross else: # If x_cross is outside of segment k's minimal bounds if (x_cross < segments[k].min[0]): # check that x_cross is close enough to k to warrant intersection if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, ( segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)): # If it is, update k(min) segments[k].min[0] = x_cross if (segments[k].slope[0] >= 0): segments[k].min[1] = y_cross else: segments[k].max[1] = y_cross #else: # x_cross is within bounds of k ############################################################################## # Hypothesis 3 # proposal: Connecting the ends of lines will provide more sensible connections # than connecting intersections # result: Compact groups, lots of unnecessary crossing lines. # conclusion: Most lines only need to connect once at each end if(hypothesis == 3): max_extend = 6 changeFlag = True connected = np.zeros((last - first, last - first), dtype=bool) while(changeFlag): changeFlag = False for j in range(first, last): for k in range(first,last): if(j < k): if(connected[j-first,k-first] == False): #First, do these lines already intersect? if (segments[j].slope[0] == segments[k].slope[0]): # They never intersect intersect = False else: x_cross = np.divide((segments[k].intercept[0] - segments[j].intercept[0]), (segments[j].slope[0] - segments[k].slope[0])) # y = ax + b y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept[0] intersect = False #if((x_cross > segments[k].min[0]) & (x_cross > segments[j].min[0]) # & (x_cross < segments[k].max[0]) & (x_cross < segments[j].max[0])): # intersect = True # connected[j-first,k-first] = True # connected[k-first,j-first] = True if(intersect == False): #Are the ends of these lines close together? distance = np.zeros(4) #min -> min distance[0] = np.sqrt(np.sum((np.power(segments[j].start[0] - segments[k].start[0],2), np.power((segments[j].start[1] - segments[k].start[1]), 2)))) #min -> max distance[1] = np.sqrt(np.sum((np.power((segments[j].start[0] - segments[k].end[0]),2), np.power((segments[j].start[1] - segments[k].end[1]), 2)))) #max -> min distance[2] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].start[0]),2), np.power((segments[j].end[1] - segments[k].start[1]), 2)))) #max -> max distance[3] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].end[0]),2), np.power((segments[j].end[1] - segments[k].end[1]), 2)))) ind = np.argmin(distance) if distance[ind] < max_extend: if(distance[ind] == 0): connected[j - first, k - first] = True connected[k - first, j - first] = True else: changeFlag = True switcher = { 0: [[segments[j].start[0], segments[j].start[1]], [segments[k].start[0], segments[k].start[1]]], 1: [[segments[j].start[0], segments[j].start[1]], [segments[k].end[0], segments[k].end[1]]], 2: [[segments[j].end[0], segments[j].end[1]], [segments[k].start[0], segments[k].start[1]]], 3: [[segments[j].end[0], segments[j].end[1]], [segments[k].end[0], segments[k].end[1]]], } data = switcher.get(ind) connected[j - first, k - first] = True connected[k - first, j - first] = True segments = np.insert(segments, last, sm.line_of_best_fit(data)) segments[last].start = [data[0][0], data[0][1]] segments[last].end = [data[1][0], data[1][1]] segmentParent[i:] = segmentParent[i:]+1 ############################################################################## # Hypothesis 4 # proposal: A greedy search for new end-of-line connections up to a maximum of 1 connection at each end # Followed by a greedy search for loose end-of-line connections # result: Much tidier groups, though lines appear jittery. # conclusion: It might be better to move nodes rather than draw new edges. if (hypothesis == 4): big_number = 9999999999999 max_extend = 6 connected_lines = np.zeros(last - first,dtype=bool) connected = np.zeros((last-first, last-first),dtype=bool) #for j in range(first, last): # for k in range(first, last): # if (j < k): # First, do these lines already intersect? #if (segments[j].slope[0] == segments[k].slope[0]): # They never intersect, but could connect # if(segments[j].intercept[0] == segments[k].intercept[0]): #They are on the same line #Only need to check x value equality, since lines are parallel # if(((segments[j].start[0] >= segments[k].start[0]) # & (segments[j].start[0] <= segments[k].end[0])) # ^ ((segments[j].start[0] >= segments[k].end[0]) # & (segments[j].start[0] <= segments[k].start[0]))): ## segments[j].start_connect = k # connected[j-first, k-first] = True ## connected[k-first, j-first] = True # if (((segments[j].end[0] >= segments[k].start[0]) # & (segments[j].end[0] <= segments[k].end[0])) # ^ ((segments[j].end[0] >= segments[k].end[0]) # & (segments[j].end[0] <= segments[k].start[0]))): # segments[j].end_connect = k # connected[j-first, k-first] = True # connected[k-first, j-first] = True # if (((segments[k].start[0] >= segments[j].start[0]) # & (segments[k].start[0] <= segments[j].end[0])) # ^ ((segments[k].start[0] >= segments[j].end[0]) # & (segments[k].start[0] <= segments[j].start[0]))): # segments[k].start_connect = j # connected[j-first, k-first] = True # connected[k-first, j-first] = True # if (((segments[k].end[0] >= segments[j].start[0]) ### & (segments[k].end[0] <= segments[j].end[0])) # ^ ((segments[k].end[0] >= segments[j].end[0]) # & (segments[k].end[0] <= segments[j].start[0]))): # segments[k].end_connect = j # connected[j-first, k-first] = True # connected[k-first, j-first] = True# # The next pair of conditions should NEVER occur # However, the check has been included for sanity # if((segments[j].end_connect == k) # & (segments[j].start_connect == k)): # #(Line j < Line k) ^ (Line j = Line k) # np.delete(segments, j, 0) # last = last - 1 # segmentParent[i:] = segmentParent[i:] + -1 # np.delete(connected_lines, j-first, 0) # np.delete(connected, j-first, 0) # np.delete(connected, j-first, 1) # else: # if ((segments[k].end_connect == j) # & (segments[k].start_connect == j)): # #Line k < Line j # np.delete(segments, k, 0) # last = last - 1 # segmentParent[i:] = segmentParent[i:] + -1 # np.delete(connected_lines, k-first, 0) # np.delete(connected, k-first, 0) # np.delete(connected, k-first, 1) #The lines are not parallel, continue intersection check #else: # x = (b2 - b1)/(a1 - a2) # x_cross = np.rint(np.divide( # (segments[k].intercept[0] - segments[j].intercept[0]), # (segments[j].slope[0] - segments[k].slope[0]))) #This introduces bugs due to errors introduced through division #Rounding could help, but the direction of rounding would need to be know #if ((x_cross >= segments[k].min[0]) & (x_cross >= segments[j].min[0]) # & (x_cross <= segments[k].max[0]) & (x_cross <= segments[j].max[0])): # #Lines intersect! # #But where...? # if(abs(segments[k].end[0] - x_cross) < abs(segments[k].start[0] - x_cross)): # segments[k].end_connect = j # else: # segments[k].start_connect = j # if(abs(segments[j].end[0] - x_cross) < abs(segments[j].start[0] - x_cross)): # segments[j].end_connect = k # else: # segments[j].start_connect = k # connected[j-first,k-first] = True # connected[k-first,j-first] = True #If start and end of line is connected, then do not connect them again #for j in range(first, last): # if ((segments[j].start_connect >= 0) & (segments[j].end_connect >= 0)): # connected_lines[j-first] = True #Find lines that haven't been fully connected yet unconnected = np.where(connected_lines == False)[0]+first num_lines = unconnected.shape[0] #Build adjacency matrix for lines that haven't been connected line_adjacency = np.zeros((num_lines, num_lines,4), dtype=float) #For lines that haven't been fully connected... ##########Calculate line end distances for j in range(num_lines): for k in range(num_lines): if j < k: #Not considering joined pairs of partially connected lines if(connected[j,k] == True): line_adjacency[j,k,0] = big_number line_adjacency[j,k,1] = big_number line_adjacency[j,k,2] = big_number line_adjacency[j,k,3] = big_number else: #Measure the distance between the ends of the lines #Ensure that lines are unconnected before measuring distance # start -> start line_adjacency[j,k,:] = sm.line_distances(segments[unconnected[j]],segments[unconnected[k]]) else: if(j == k): line_adjacency[j, k, 0] = big_number line_adjacency[j, k, 1] = big_number line_adjacency[j, k, 2] = big_number line_adjacency[j, k, 3] = big_number else: # If line has already been processed, copy distance values line_adjacency[j, k,0] = line_adjacency[k, j,0] line_adjacency[j, k,1] = line_adjacency[k, j,2] line_adjacency[j, k,2] = line_adjacency[k, j,1] line_adjacency[j, k,3] = line_adjacency[k, j,3] connect_flag = True l = 0 #Whilst there are still partially connected lines less than [max_extend] distance apart while(connect_flag == True): #Find the shortest distance (greedy strategy) # argmin gives flatIndex, # use unravel_index with array shape to return 3d index #If the shortest distance is acceptable if line_adjacency.size == 0: connect_flag = False else: j, k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape) if line_adjacency[j,k,l] < max_extend: if(line_adjacency[j,k,l] == 0): node = sm.attach_lines(segments[unconnected[j]], segments[unconnected[k]], l) if (node.id >= num_nodes): nodes.append(node) num_nodes += 1 connected[k, j] = True connected[j, k] = True line_adjacency[j, k, :] = big_number line_adjacency[k, j, :] = big_number else: #Create a new line to bridge the distance segments = np.insert(segments, last, sm.connect_lines(segments[unconnected[j]], segments[unconnected[k]], l)) if (segments[last].nodes[0] is not None): if (segments[last].nodes[0].id >= num_nodes): nodes.append(segments[last].nodes[0]) num_nodes += 1 if (segments[last].nodes[1] is not None): if (segments[last].nodes[1].id >= num_nodes): nodes.append(segments[last].nodes[1]) num_nodes += 1 segmentParent[i:] = segmentParent[i:] + 1 connected = np.hstack((connected, np.zeros((last-first, 1), dtype=bool))) connected = np.vstack((connected, np.zeros((1,last-first+1), dtype=bool))) connected[k, last-first] = True connected[j, last-first] = True connected[last-first, k] = True connected[last-first, j] = True connected[k,j] = True connected[j,k] = True line_adjacency[j, k, :] = big_number line_adjacency[k, j, :] = big_number #Adjacency switcher is used to select relevant line_adjacency values #For each 'connection made type' row: #First values identify connections types that line1 can no longer make #Second values identify connections types that line2 can no longer make #Third values identify connections types that j can no longer receive #Fourth values identify connections types that k can no longer receive adjacency_switcher = { 0: [[0, 1],[0, 1],[0, 2],[0, 2]], #Type start->start 1: [[0, 1],[2, 3],[0, 2],[1, 3]], #Type start->end 2: [[2, 3],[0, 1],[1, 3],[0, 2]], #Type end->start 3: [[2, 3],[2, 3],[1, 3],[1, 3]], #Type end->end } inds = adjacency_switcher[l] line_adjacency[j,:,inds[0]] = big_number line_adjacency[k,:,inds[1]] = big_number line_adjacency[:,j,inds[2]] = big_number line_adjacency[:,k,inds[3]] = big_number last = last + 1 diff = 0 if ((segments[unconnected[j]].start_connect >= 0) & (segments[unconnected[j]].end_connect >= 0)): connected_lines[j] = True unconnected = np.delete(unconnected, j, 0) line_adjacency = np.delete(line_adjacency, j, 0) line_adjacency = np.delete(line_adjacency, j, 1) num_lines = num_lines - 1 if k > j: diff = 1 if ((segments[unconnected[k-diff]].start_connect >= 0) & (segments[unconnected[k-diff]].end_connect >= 0)): connected_lines[k] = True unconnected = np.delete(unconnected, k-diff, 0) line_adjacency = np.delete(line_adjacency, k-diff, 0) line_adjacency = np.delete(line_adjacency, k-diff, 1) num_lines = num_lines - 1 else: connect_flag = False #Now there are only partially connected lines remaining #We should see if these can connect to any nearby lines num_remain = unconnected.shape[0] #unconnected have been being deleted upon full-connection during previous step line_adjacency = np.zeros((last-first, 4)) #max_extend = 10 for j in range(num_remain): for k in range(last-first): #Cannot connect to self if(unconnected[j] == k+first): line_adjacency[k, :] = big_number else: #Cannot reconnect over previously connections if(connected[unconnected[j]-first,k] == True): line_adjacency[k,:] = big_number else: #Measure distance to all other ends of lines if(segments[unconnected[j]].start_connect < 0): line_adjacency[k, 0] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].start) line_adjacency[k, 1] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].end) else: line_adjacency[k, 0] = big_number line_adjacency[k, 1] = big_number if(segments[unconnected[j]].end_connect < 0): line_adjacency[k, 2] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].start) line_adjacency[k, 3] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].end) else: line_adjacency[k, 2] = big_number line_adjacency[k, 3] = big_number # sm.line_distances(segments[unconnected[j]],segments[k+first]) k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape) #If shortest distance is below threshold, make connection if line_adjacency[k,l] < max_extend: if (line_adjacency[k,l] == 0): #If shortest distance indicates prior connection, form connection formally connected[unconnected[j] - first, k] = True connected[k, unconnected[j] - first] = True node = sm.attach_lines(segments[unconnected[j]], segments[k+first], l) if (node.id >= num_nodes): nodes.append(node) num_nodes += 1 else: changeFlag = True segments = np.insert(segments, last, sm.connect_lines(segments[unconnected[j]], segments[k+first], l)) if (segments[last].nodes[0] is not None): if (segments[last].nodes[0].id >= num_nodes): nodes.append(segments[last].nodes[0]) num_nodes += 1 if (segments[last].nodes[1] is not None): if (segments[last].nodes[1].id >= num_nodes): nodes.append(segments[last].nodes[1]) num_nodes += 1 connected[unconnected[j] - first, k] = True connected[k, unconnected[j] - first] = True segmentParent[i:] = segmentParent[i:] + 1 connected = np.hstack((connected, np.zeros((last - first, 1), dtype=bool))) connected = np.vstack((connected, np.zeros((1, last - first + 1), dtype=bool))) connected[k, last-first] = True connected[unconnected[j]-first, last-first] = True connected[last-first, k] = True connected[last-first, unconnected[j]-first] = True line_adjacency[k, :] = big_number if((k+first) in unconnected): line_adjacency[np.where(unconnected==(k+first))[0]] = big_number line_adjacency = np.vstack((line_adjacency, np.multiply(np.ones((1,4)),big_number))) last = last + 1 #print(checkCycles(segments[first:last])) #plt.axes(axarr[1]) #axarr[1].imshow(image, cmap=plt.cm.gray) #for m in range(first): # plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'r-') # # for m in range(first,last): # plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'g-') graph = sm.getEdges(segments[first:last]) nodes2 = sm.getNodes(segments[first:last]) cycles = sm.find_nxCycle(graph) #print(cycles) cycles = sm.mergeCycles(cycles) boxes = sm.findBounds(cycles, nodes2) for box in boxes: coord, width, height = sm.boxToMatplotPatch(box) # axarr[1].add_patch( # patches.Rectangle( # coord, width, height,#(x,y), width, height # fill=False # ) # ) im = image[ int(np.maximum(0, coord[1] - np.floor_divide(height, 4))):int( np.minimum(coord[1] + height + np.floor_divide(height, 4), edges.shape[0])), int(np.maximum(0, coord[0] - np.floor_divide(width, 4))):int( np.minimum(coord[0] + width + np.floor_divide(width, 4), edges.shape[1]))] if ((width >= 20) & (height >= 20)): # Big enough for 1px DEM if ((width > 160) & (height > 160)): # Big enough for 8px DEM # coord = x1,y1 # x2 = x1+width # y2 = y1+height filename = base_folder + "craters/" + base_filename + '_big_crater' + str( num_bigcraters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str( int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height))) num_bigcraters = num_bigcraters + 1 else: filename = base_folder + "craters/" + base_filename + '_crater' + str( num_craters) + '_at_x' + str(int(np.floor_divide(coord[0],1))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str( int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height))) num_craters = num_craters + 1 else: filename = base_folder + "craters/" + base_filename + '_little_crater' + str( num_lil_craters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width)))+ 'at_y' + str( int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height))) num_lil_craters = num_lil_craters + 1 im2 = Image.fromarray(im) im2.save(filename + '.png') #cycles = sm.findCycles(drawGraph(segments[first:last])) #if (len(cycles) > 0): # print(cycles) #y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0] #a = np.divide(np.ones(len(line.slope)), line.slope) #b = y1 - np.multiply(a, minX) #x2 = np.divide(line.intercept - b, a - line.slope) #y2 = (line.slope * x2) + line.intercept #if x2 < minX: # minX = x2 #if y2 < minY: # minY = y2 #x1 = (np.divide((minY - line.intercept),line.slope))[0][0] #y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0] #x2 = (np.divide((maxY - line.intercept), line.slope))[0][0] #y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0] #if(y1 > minY): # y1 = minY #x1 = minX #y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0] #if(y2 < maxY): # y2 = maxY #x2 = maxX #for line in segments: # If negative correlation, then [minX, maxY], [maxX, minY] # plt.plot([line.start[1], line.end[1]], [line.start[0], line.end[0]], 'r-') #if (line.slope[0] > 0): # plt.plot([line.min[1], line.max[1]], [line.min[0], line.max[0]], 'r-') #else: # plt.plot([line.min[1], line.max[1]], [line.max[0], line.min[0]], 'r-') print("end")
0.013766
#!/usr/bin/env python # # Copyright 2009,2010,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, blocks class test_copy(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_copy(self): src_data = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) expected_result = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) src = blocks.vector_source_b(src_data) op = blocks.copy(gr.sizeof_char) dst = blocks.vector_sink_b() self.tb.connect(src, op, dst) self.tb.run() dst_data = dst.data() self.assertEqual(expected_result, dst_data) def test_copy_drop (self): src_data = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) expected_result = () src = blocks.vector_source_b(src_data) op = blocks.copy(gr.sizeof_char) op.set_enabled(False) dst = blocks.vector_sink_b() self.tb.connect(src, op, dst) self.tb.run() dst_data = dst.data() self.assertEqual(expected_result, dst_data) if __name__ == '__main__': gr_unittest.run(test_copy, "test_copy.xml")
0.003186
# encoding: utf-8 from waflib import Options, Logs from waflib.Configure import conf def addDependencyOptions(self, opt, name, extraHelp=''): opt.add_option('--with-%s' % name, type='string', default=None, dest='with_%s' % name, help='Path to %s, e.g., /usr/local %s' % (name, extraHelp)) setattr(Options.OptionsContext, "addDependencyOptions", addDependencyOptions) @conf def checkDependency(self, name, **kw): root = kw.get('path', getattr(Options.options, 'with_%s' % name)) kw['msg'] = kw.get('msg', 'Checking for %s library' % name) kw['uselib_store'] = kw.get('uselib_store', name.upper()) kw['define_name'] = kw.get('define_name', 'HAVE_%s' % kw['uselib_store']) kw['mandatory'] = kw.get('mandatory', True) if root: isOk = self.check_cxx(includes="%s/include" % root, libpath="%s/lib" % root, **kw) else: isOk = self.check_cxx(**kw) if isOk: self.env[kw['define_name']] = True
0.002865
#!/usr/bin/env python #///////////////////////////////////////////////////////////// # # ROS related imports import rospy from sensor_msgs.msg import Imu from std_msgs.msg import Float64 from std_msgs.msg import Bool from rospy_tutorials.msg import Floats # GPIO related imports from gpiozero import MCP3208 #from gpiozero import Button #from gpiozero import DigitalOutputDevice # Other Imports import sys import os.path import math import time import datetime def limVal(var, lower, upper): if var < lower: var = lower elif var > upper: var = upper return var def low_pass(curr, prev, alpha): return curr * alpha + prev * (1 - alpha) # #///////////////////////////////////////////////////////////////// # Startup #///////////////////////////////////////////////////////////////// # # MCP3208 A/D Startup #//////////////////////////////////////////////////////////////// # MCP3008(channel=0, clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=8) # 8 channel A/D microchip # usually Chip Select is not connected to pin 22 but to pin 8 or 7 # if new sensor board is made, change that pin to new chip select connection # do not change mosi_pin # do not change clock pin # select_pin is same for a single A/D chip, so keep same for all channels # One input is converted at a time ch0=MCP3208(channel=0, clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=22) ch4=MCP3208(channel=4, clock_pin=11, mosi_pin=10, miso_pin=9, select_pin=22) # interval for reading MCP A/D converter, needs to be same or slower than IMU rate # in seconds AmpHour = 0.0 pressureTorpedo = 0.0 # poll_interval = 100 print("Recommended Poll Interval: %dmS\n" % poll_interval) # # ROS Startup # ////////////////////////////////////////////////////////////////////// pubAmpHour = rospy.Publisher('AmpHour', Float64, queue_size=10) pubPressureTorpedo = rospy.Publisher('pressureTorpedo`', Float64, queue_size=10) rate = rospy.Rate(1000 / poll_interval) # # //////////////////////////////////////////////////////////////////////// # Main Loop # //////////////////////////////////////////////////////////////////////// # # while True: currentTimeS = time.time() #print("Time %+6.3f" % (currentTimeS)) # # Pressure Sensor # # NBP series unamplified 0..150psi # Output 0..130mV reading0 = ch0.raw_value voltage0 = reading0 * 3.3 / 4096 # conversion mv/V/full scale span 0...150psi 21.7 26.0 30.0 # remove zero convert to mv nominal sensitivity full range # At ambient pressure the reading is close to 4096/2; Offset is 2069 +/-1 # Calibration not verified yet !!!! keep 2069 though pressureTorpedo = (reading0 - 2069) * 3.3 / 4096 * 1000 / 26 * 150 # # Battery Current Sensor # reading4 = ch4.raw_value voltage4 = reading4 * 3.3 / 4096 # Some calibration here # current = (voltage4-0.001) * 1000 current = 0.0 AmpHour = AmpHour + ((currentTimeS - previousTimeMCPS) * current / 3600) # # Need to send it to ROS master somehow # if not rospy.is_shutdown(): pubAmpHour.publish(AmpHour) pubPressureTorpedo.publish(pressureTorpedo) # # At reduced rate display data (10Hz) # if ((currentTimeS - previousDisplayTimeS) > 0.1): print("Torpedo Pressure Reading = %2.2f psi" % (pressureTorpedo) ) print("AmpHour consumed %3.3f [Ahr]" % (AmpHour) ) previousDisplayTimeS = currentTimeS # release task # use rate once ROS is working and booted up rate.sleep() #timeRemaining = poll_interval/1000.0 - (time.time() - currentTimeS) #if (timeRemaining > 0): # time.sleep(timeRemaining)
0.019624
import logging from autotest.client.shared import error from virttest import aexpect, utils_misc @error.context_aware def run_autotest_regression(test, params, env): """ Autotest regression test: Use Virtual Machines to test autotest. 1) Clone the given guest OS (only Linux) image twice. 2) Boot 2 VMs (autotest_server_vm and autotest_client_vm) 4) Install the autotest server in the server vm 5) Run the unittests 6) Run the pylint checker 7) Run a simple client sleeptest 8) Run a simple server sleeptest 9) Register the client vm in the autotest server 10) Schedule a simple job sleeptest in the client. Wait for client reboot. 11) If any of these steps have failed, fail the test and report the error :param test: virt test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ step_failures = [] autotest_repo = params['autotest_repo'] autotest_branch = params['autotest_branch'] autotest_commit = params['autotest_commit'] password = params['password'] autotest_install_timeout = int( params.get('autotest_install_timeout', 1800)) unittests_run_timeout = int(params.get('unittests_run_timeout', 1800)) pylint_run_timeout = int(params.get('pylint_run_timeout', 1800)) vm_names = params["vms"].split() server_name = vm_names[0] client_name = vm_names[1] vm_server = env.get_vm(server_name) vm_server.verify_alive() vm_client = env.get_vm(client_name) vm_client.verify_alive() timeout = float(params.get("login_timeout", 240)) session_server = vm_server.wait_for_login(timeout=timeout) session_client = vm_client.wait_for_login(timeout=timeout) client_ip = vm_client.get_address() server_ip = vm_server.get_address() step1 = "autotest-server-install" try: installer_file = "install-autotest-server.sh" installer_url = ("https://raw.github.com/autotest/autotest/master" "/contrib/%s" % installer_file) # Download the install script and execute it download_cmd = ("python -c 'from urllib2 import urlopen; " "r = urlopen(\"%s\"); " "f = open(\"%s\", \"w\"); " "f.write(r.read())'" % (installer_url, installer_file)) session_server.cmd(download_cmd) permission_cmd = ("chmod +x install-autotest-server.sh") session_server.cmd(permission_cmd) install_cmd = ("./install-autotest-server.sh -u Aut0t3st -d Aut0t3st " "-g %s -b %s" % (autotest_repo, autotest_branch)) if autotest_commit: install_cmd += " -c %s" % autotest_commit session_server.cmd(install_cmd, timeout=autotest_install_timeout) except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step1) vm_server.copy_files_from(guest_path="/tmp/install-autotest-server*log", host_path=test.resultsdir) top_commit = None try: session_server.cmd("test -d /usr/local/autotest/.git") session_server.cmd("cd /usr/local/autotest") top_commit = session_server.cmd( "echo `git log -n 1 --pretty=format:%H`") top_commit = top_commit.strip() logging.info("Autotest top commit for repo %s, branch %s: %s", autotest_repo, autotest_branch, top_commit) except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) if top_commit is not None: session_server.close() session_server = vm_server.wait_for_login(timeout=timeout, username='autotest', password='Aut0t3st') step2 = "unittests" try: session_server.cmd("cd /usr/local/autotest") session_server.cmd("utils/unittest_suite.py --full", timeout=unittests_run_timeout) except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step2) step3 = "pylint" try: session_server.cmd("cd /usr/local/autotest") session_server.cmd("utils/check_patch.py --full --yes", timeout=pylint_run_timeout) except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step3) step4 = "client_run" try: session_server.cmd("cd /usr/local/autotest/client") session_server.cmd("./autotest-local run sleeptest", timeout=pylint_run_timeout) session_server.cmd("rm -rf results/default") except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step4) step5 = "server_run" try: session_client.cmd("iptables -F") session_server.cmd("cd /usr/local/autotest") session_server.cmd("server/autotest-remote -m %s --ssh-user root " "--ssh-pass %s " "-c client/tests/sleeptest/control" % (client_ip, password), timeout=pylint_run_timeout) session_server.cmd("rm -rf results-*") except aexpect.ShellCmdError, e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step5) step6 = "registering_client_cli" try: label_name = "label-%s" % utils_misc.generate_random_id() create_label_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "label create -t %s -w %s" % (label_name, server_ip)) session_server.cmd(create_label_cmd) list_labels_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "label list -a -w %s" % server_ip) list_labels_output = session_server.cmd(list_labels_cmd) for line in list_labels_output.splitlines(): logging.debug(line) if not label_name in list_labels_output: raise ValueError("No label %s in the output of %s" % (label_name, list_labels_cmd)) create_host_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "host create -t %s %s -w %s" % (label_name, client_ip, server_ip)) session_server.cmd(create_host_cmd) list_hosts_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "host list -w %s" % server_ip) list_hosts_output = session_server.cmd(list_hosts_cmd) for line in list_hosts_output.splitlines(): logging.debug(line) if not client_ip in list_hosts_output: raise ValueError("No client %s in the output of %s" % (client_ip, create_label_cmd)) if not label_name in list_hosts_output: raise ValueError("No label %s in the output of %s" % (label_name, create_label_cmd)) except (aexpect.ShellCmdError, ValueError), e: if isinstance(e, aexpect.ShellCmdError): for line in e.output.splitlines(): logging.error(line) elif isinstance(e, ValueError): logging.error(e) step_failures.append(step6) step7 = "running_job_cli" try: session_client.cmd("iptables -F") job_name = "Sleeptest %s" % utils_misc.generate_random_id() def job_is_status(status): list_jobs_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "job list -a -w %s" % server_ip) list_jobs_output = session_server.cmd(list_jobs_cmd) if job_name in list_jobs_output: if status in list_jobs_output: return True elif "Aborted" in list_jobs_output: raise ValueError("Job is in aborted state") elif "Failed" in list_jobs_output: raise ValueError("Job is in failed state") else: return False else: raise ValueError("Job %s does not show in the " "output of %s" % (job_name, list_jobs_cmd)) def job_is_completed(): return job_is_status("Completed") def job_is_running(): return job_is_status("Running") job_create_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " "job create --test sleeptest -m %s '%s' -w %s" % (client_ip, job_name, server_ip)) session_server.cmd(job_create_cmd) if not utils_misc.wait_for(job_is_running, 300, 0, 10, "Waiting for job to start running"): raise ValueError("Job did not start running") # Wait for the session to become unresponsive if not utils_misc.wait_for( lambda: not session_client.is_responsive(), timeout=300): raise ValueError("Client machine did not reboot") # Establish a new client session session_client = vm_client.wait_for_login(timeout=timeout) # Wait for the job to complete if not utils_misc.wait_for(job_is_completed, 300, 0, 10, "Waiting for job to complete"): raise ValueError("Job did not complete") # Copy logs back so we can analyze them vm_server.copy_files_from( guest_path="/usr/local/autotest/results/*", host_path=test.resultsdir) except (aexpect.ShellCmdError, ValueError), e: if isinstance(e, aexpect.ShellCmdError): for line in e.output.splitlines(): logging.error(line) elif isinstance(e, ValueError): logging.error(e) step_failures.append(step7) def report_version(): if top_commit is not None: logging.info("Autotest git repo: %s", autotest_repo) logging.info("Autotest git branch: %s", autotest_repo) logging.info("Autotest top commit: %s", top_commit) if step_failures: logging.error("The autotest regression testing failed") report_version() raise error.TestFail("The autotest regression testing had the " "following steps failed: %s" % step_failures) else: logging.info("The autotest regression testing passed") report_version()
0.000351
"""Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """ from __future__ import print_function, division from sympy.polys.densebasic import ( dup_strip, dmp_raise, dmp_zero, dmp_one, dmp_ground, dmp_one_p, dmp_zero_p, dmp_zeros, dup_degree, dmp_degree, dmp_degree_in, dup_LC, dmp_LC, dmp_ground_LC, dmp_multi_deflate, dmp_inflate, dup_convert, dmp_convert, dmp_apply_pairs) from sympy.polys.densearith import ( dup_sub_mul, dup_neg, dmp_neg, dmp_add, dmp_sub, dup_mul, dmp_mul, dmp_pow, dup_div, dmp_div, dup_rem, dup_quo, dmp_quo, dup_prem, dmp_prem, dup_mul_ground, dmp_mul_ground, dmp_mul_term, dup_quo_ground, dmp_quo_ground, dup_max_norm, dmp_max_norm) from sympy.polys.densetools import ( dup_clear_denoms, dmp_clear_denoms, dup_diff, dmp_diff, dup_eval, dmp_eval, dmp_eval_in, dup_trunc, dmp_ground_trunc, dup_monic, dmp_ground_monic, dup_primitive, dmp_ground_primitive, dup_extract, dmp_ground_extract) from sympy.polys.galoistools import ( gf_int, gf_crt) from sympy.polys.polyerrors import ( MultivariatePolynomialError, HeuristicGCDFailed, HomomorphismFailed, NotInvertible, DomainError) from sympy.polys.polyconfig import query from sympy.ntheory import nextprime from sympy.core.compatibility import range def dup_half_gcdex(f, g, K): """ Half extended Euclidean algorithm in `F[x]`. Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15 >>> g = x**3 + x**2 - 4*x - 4 >>> R.dup_half_gcdex(f, g) (-1/5*x + 3/5, x + 1) """ if not K.has_Field: raise DomainError("can't compute half extended GCD over %s" % K) a, b = [K.one], [] while g: q, r = dup_div(f, g, K) f, g = g, r a, b = b, dup_sub_mul(a, q, b, K) a = dup_quo_ground(a, dup_LC(f, K), K) f = dup_monic(f, K) return a, f def dmp_half_gcdex(f, g, u, K): """ Half extended Euclidean algorithm in `F[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) """ if not u: return dup_half_gcdex(f, g, K) else: raise MultivariatePolynomialError(f, g) def dup_gcdex(f, g, K): """ Extended Euclidean algorithm in `F[x]`. Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15 >>> g = x**3 + x**2 - 4*x - 4 >>> R.dup_gcdex(f, g) (-1/5*x + 3/5, 1/5*x**2 - 6/5*x + 2, x + 1) """ s, h = dup_half_gcdex(f, g, K) F = dup_sub_mul(h, s, f, K) t = dup_quo(F, g, K) return s, t, h def dmp_gcdex(f, g, u, K): """ Extended Euclidean algorithm in `F[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) """ if not u: return dup_gcdex(f, g, K) else: raise MultivariatePolynomialError(f, g) def dup_invert(f, g, K): """ Compute multiplicative inverse of `f` modulo `g` in `F[x]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = x**2 - 1 >>> g = 2*x - 1 >>> h = x - 1 >>> R.dup_invert(f, g) -4/3 >>> R.dup_invert(f, h) Traceback (most recent call last): ... NotInvertible: zero divisor """ s, h = dup_half_gcdex(f, g, K) if h == [K.one]: return dup_rem(s, g, K) else: raise NotInvertible("zero divisor") def dmp_invert(f, g, u, K): """ Compute multiplicative inverse of `f` modulo `g` in `F[X]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) """ if not u: return dup_invert(f, g, K) else: raise MultivariatePolynomialError(f, g) def dup_euclidean_prs(f, g, K): """ Euclidean polynomial remainder sequence (PRS) in `K[x]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 >>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 >>> prs = R.dup_euclidean_prs(f, g) >>> prs[0] x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 >>> prs[1] 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 >>> prs[2] -5/9*x**4 + 1/9*x**2 - 1/3 >>> prs[3] -117/25*x**2 - 9*x + 441/25 >>> prs[4] 233150/19773*x - 102500/6591 >>> prs[5] -1288744821/543589225 """ prs = [f, g] h = dup_rem(f, g, K) while h: prs.append(h) f, g = g, h h = dup_rem(f, g, K) return prs def dmp_euclidean_prs(f, g, u, K): """ Euclidean polynomial remainder sequence (PRS) in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) """ if not u: return dup_euclidean_prs(f, g, K) else: raise MultivariatePolynomialError(f, g) def dup_primitive_prs(f, g, K): """ Primitive polynomial remainder sequence (PRS) in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 >>> g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 >>> prs = R.dup_primitive_prs(f, g) >>> prs[0] x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 >>> prs[1] 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 >>> prs[2] -5*x**4 + x**2 - 3 >>> prs[3] 13*x**2 + 25*x - 49 >>> prs[4] 4663*x - 6150 >>> prs[5] 1 """ prs = [f, g] _, h = dup_primitive(dup_prem(f, g, K), K) while h: prs.append(h) f, g = g, h _, h = dup_primitive(dup_prem(f, g, K), K) return prs def dmp_primitive_prs(f, g, u, K): """ Primitive polynomial remainder sequence (PRS) in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) """ if not u: return dup_primitive_prs(f, g, K) else: raise MultivariatePolynomialError(f, g) def dup_inner_subresultants(f, g, K): """ Subresultant PRS algorithm in `K[x]`. Computes the subresultant polynomial remainder sequence (PRS) and the non-zero scalar subresultants of `f` and `g`. By [1] Thm. 3, these are the constants '-c' (- to optimize computation of sign). The first subdeterminant is set to 1 by convention to match the polynomial and the scalar subdeterminants. If 'deg(f) < deg(g)', the subresultants of '(g,f)' are computed. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_inner_subresultants(x**2 + 1, x**2 - 1) ([x**2 + 1, x**2 - 1, -2], [1, 1, 4]) References ========== [1] W.S. Brown, The Subresultant PRS Algorithm. ACM Transaction of Mathematical Software 4 (1978) 237-249 """ n = dup_degree(f) m = dup_degree(g) if n < m: f, g = g, f n, m = m, n if not f: return [], [] if not g: return [f], [K.one] R = [f, g] d = n - m b = (-K.one)**(d + 1) h = dup_prem(f, g, K) h = dup_mul_ground(h, b, K) lc = dup_LC(g, K) c = lc**d # Conventional first scalar subdeterminant is 1 S = [K.one, c] c = -c while h: k = dup_degree(h) R.append(h) f, g, m, d = g, h, k, m - k b = -lc * c**d h = dup_prem(f, g, K) h = dup_quo_ground(h, b, K) lc = dup_LC(g, K) if d > 1: # abnormal case q = c**(d - 1) c = K.quo((-lc)**d, q) else: c = -lc S.append(-c) return R, S def dup_subresultants(f, g, K): """ Computes subresultant PRS of two polynomials in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_subresultants(x**2 + 1, x**2 - 1) [x**2 + 1, x**2 - 1, -2] """ return dup_inner_subresultants(f, g, K)[0] def dup_prs_resultant(f, g, K): """ Resultant algorithm in `K[x]` using subresultant PRS. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_prs_resultant(x**2 + 1, x**2 - 1) (4, [x**2 + 1, x**2 - 1, -2]) """ if not f or not g: return (K.zero, []) R, S = dup_inner_subresultants(f, g, K) if dup_degree(R[-1]) > 0: return (K.zero, R) return S[-1], R def dup_resultant(f, g, K, includePRS=False): """ Computes resultant of two polynomials in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_resultant(x**2 + 1, x**2 - 1) 4 """ if includePRS: return dup_prs_resultant(f, g, K) return dup_prs_resultant(f, g, K)[0] def dmp_inner_subresultants(f, g, u, K): """ Subresultant PRS algorithm in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = 3*x**2*y - y**3 - 4 >>> g = x**2 + x*y**3 - 9 >>> a = 3*x*y**4 + y**3 - 27*y + 4 >>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16 >>> prs = [f, g, a, b] >>> sres = [[1], [1], [3, 0, 0, 0, 0], [-3, 0, 0, -12, 1, 0, -54, 8, 729, -216, 16]] >>> R.dmp_inner_subresultants(f, g) == (prs, sres) True """ if not u: return dup_inner_subresultants(f, g, K) n = dmp_degree(f, u) m = dmp_degree(g, u) if n < m: f, g = g, f n, m = m, n if dmp_zero_p(f, u): return [], [] v = u - 1 if dmp_zero_p(g, u): return [f], [dmp_ground(K.one, v)] R = [f, g] d = n - m b = dmp_pow(dmp_ground(-K.one, v), d + 1, v, K) h = dmp_prem(f, g, u, K) h = dmp_mul_term(h, b, 0, u, K) lc = dmp_LC(g, K) c = dmp_pow(lc, d, v, K) S = [dmp_ground(K.one, v), c] c = dmp_neg(c, v, K) while not dmp_zero_p(h, u): k = dmp_degree(h, u) R.append(h) f, g, m, d = g, h, k, m - k b = dmp_mul(dmp_neg(lc, v, K), dmp_pow(c, d, v, K), v, K) h = dmp_prem(f, g, u, K) h = [ dmp_quo(ch, b, v, K) for ch in h ] lc = dmp_LC(g, K) if d > 1: p = dmp_pow(dmp_neg(lc, v, K), d, v, K) q = dmp_pow(c, d - 1, v, K) c = dmp_quo(p, q, v, K) else: c = dmp_neg(lc, v, K) S.append(dmp_neg(c, v, K)) return R, S def dmp_subresultants(f, g, u, K): """ Computes subresultant PRS of two polynomials in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = 3*x**2*y - y**3 - 4 >>> g = x**2 + x*y**3 - 9 >>> a = 3*x*y**4 + y**3 - 27*y + 4 >>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16 >>> R.dmp_subresultants(f, g) == [f, g, a, b] True """ return dmp_inner_subresultants(f, g, u, K)[0] def dmp_prs_resultant(f, g, u, K): """ Resultant algorithm in `K[X]` using subresultant PRS. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = 3*x**2*y - y**3 - 4 >>> g = x**2 + x*y**3 - 9 >>> a = 3*x*y**4 + y**3 - 27*y + 4 >>> b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16 >>> res, prs = R.dmp_prs_resultant(f, g) >>> res == b # resultant has n-1 variables False >>> res == b.drop(x) True >>> prs == [f, g, a, b] True """ if not u: return dup_prs_resultant(f, g, K) if dmp_zero_p(f, u) or dmp_zero_p(g, u): return (dmp_zero(u - 1), []) R, S = dmp_inner_subresultants(f, g, u, K) if dmp_degree(R[-1], u) > 0: return (dmp_zero(u - 1), R) return S[-1], R def dmp_zz_modular_resultant(f, g, p, u, K): """ Compute resultant of `f` and `g` modulo a prime `p`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = x + y + 2 >>> g = 2*x*y + x + 3 >>> R.dmp_zz_modular_resultant(f, g, 5) -2*y**2 + 1 """ if not u: return gf_int(dup_prs_resultant(f, g, K)[0] % p, p) v = u - 1 n = dmp_degree(f, u) m = dmp_degree(g, u) N = dmp_degree_in(f, 1, u) M = dmp_degree_in(g, 1, u) B = n*M + m*N D, a = [K.one], -K.one r = dmp_zero(v) while dup_degree(D) <= B: while True: a += K.one if a == p: raise HomomorphismFailed('no luck') F = dmp_eval_in(f, gf_int(a, p), 1, u, K) if dmp_degree(F, v) == n: G = dmp_eval_in(g, gf_int(a, p), 1, u, K) if dmp_degree(G, v) == m: break R = dmp_zz_modular_resultant(F, G, p, v, K) e = dmp_eval(r, a, v, K) if not v: R = dup_strip([R]) e = dup_strip([e]) else: R = [R] e = [e] d = K.invert(dup_eval(D, a, K), p) d = dup_mul_ground(D, d, K) d = dmp_raise(d, v, 0, K) c = dmp_mul(d, dmp_sub(R, e, v, K), v, K) r = dmp_add(r, c, v, K) r = dmp_ground_trunc(r, p, v, K) D = dup_mul(D, [K.one, -a], K) D = dup_trunc(D, p, K) return r def _collins_crt(r, R, P, p, K): """Wrapper of CRT for Collins's resultant algorithm. """ return gf_int(gf_crt([r, R], [P, p], K), P*p) def dmp_zz_collins_resultant(f, g, u, K): """ Collins's modular resultant algorithm in `Z[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = x + y + 2 >>> g = 2*x*y + x + 3 >>> R.dmp_zz_collins_resultant(f, g) -2*y**2 - 5*y + 1 """ n = dmp_degree(f, u) m = dmp_degree(g, u) if n < 0 or m < 0: return dmp_zero(u - 1) A = dmp_max_norm(f, u, K) B = dmp_max_norm(g, u, K) a = dmp_ground_LC(f, u, K) b = dmp_ground_LC(g, u, K) v = u - 1 B = K(2)*K.factorial(K(n + m))*A**m*B**n r, p, P = dmp_zero(v), K.one, K.one while P <= B: p = K(nextprime(p)) while not (a % p) or not (b % p): p = K(nextprime(p)) F = dmp_ground_trunc(f, p, u, K) G = dmp_ground_trunc(g, p, u, K) try: R = dmp_zz_modular_resultant(F, G, p, u, K) except HomomorphismFailed: continue if K.is_one(P): r = R else: r = dmp_apply_pairs(r, R, _collins_crt, (P, p, K), v, K) P *= p return r def dmp_qq_collins_resultant(f, g, u, K0): """ Collins's modular resultant algorithm in `Q[X]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x,y = ring("x,y", QQ) >>> f = QQ(1,2)*x + y + QQ(2,3) >>> g = 2*x*y + x + 3 >>> R.dmp_qq_collins_resultant(f, g) -2*y**2 - 7/3*y + 5/6 """ n = dmp_degree(f, u) m = dmp_degree(g, u) if n < 0 or m < 0: return dmp_zero(u - 1) K1 = K0.get_ring() cf, f = dmp_clear_denoms(f, u, K0, K1) cg, g = dmp_clear_denoms(g, u, K0, K1) f = dmp_convert(f, u, K0, K1) g = dmp_convert(g, u, K0, K1) r = dmp_zz_collins_resultant(f, g, u, K1) r = dmp_convert(r, u - 1, K1, K0) c = K0.convert(cf**m * cg**n, K1) return dmp_quo_ground(r, c, u - 1, K0) def dmp_resultant(f, g, u, K, includePRS=False): """ Computes resultant of two polynomials in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> f = 3*x**2*y - y**3 - 4 >>> g = x**2 + x*y**3 - 9 >>> R.dmp_resultant(f, g) -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16 """ if not u: return dup_resultant(f, g, K, includePRS=includePRS) if includePRS: return dmp_prs_resultant(f, g, u, K) if K.has_Field: if K.is_QQ and query('USE_COLLINS_RESULTANT'): return dmp_qq_collins_resultant(f, g, u, K) else: if K.is_ZZ and query('USE_COLLINS_RESULTANT'): return dmp_zz_collins_resultant(f, g, u, K) return dmp_prs_resultant(f, g, u, K)[0] def dup_discriminant(f, K): """ Computes discriminant of a polynomial in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_discriminant(x**2 + 2*x + 3) -8 """ d = dup_degree(f) if d <= 0: return K.zero else: s = (-1)**((d*(d - 1)) // 2) c = dup_LC(f, K) r = dup_resultant(f, dup_diff(f, 1, K), K) return K.quo(r, c*K(s)) def dmp_discriminant(f, u, K): """ Computes discriminant of a polynomial in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y,z,t = ring("x,y,z,t", ZZ) >>> R.dmp_discriminant(x**2*y + x*z + t) -4*y*t + z**2 """ if not u: return dup_discriminant(f, K) d, v = dmp_degree(f, u), u - 1 if d <= 0: return dmp_zero(v) else: s = (-1)**((d*(d - 1)) // 2) c = dmp_LC(f, K) r = dmp_resultant(f, dmp_diff(f, 1, u, K), u, K) c = dmp_mul_ground(c, K(s), v, K) return dmp_quo(r, c, v, K) def _dup_rr_trivial_gcd(f, g, K): """Handle trivial cases in GCD algorithm over a ring. """ if not (f or g): return [], [], [] elif not f: if K.is_nonnegative(dup_LC(g, K)): return g, [], [K.one] else: return dup_neg(g, K), [], [-K.one] elif not g: if K.is_nonnegative(dup_LC(f, K)): return f, [K.one], [] else: return dup_neg(f, K), [-K.one], [] return None def _dup_ff_trivial_gcd(f, g, K): """Handle trivial cases in GCD algorithm over a field. """ if not (f or g): return [], [], [] elif not f: return dup_monic(g, K), [], [dup_LC(g, K)] elif not g: return dup_monic(f, K), [dup_LC(f, K)], [] else: return None def _dmp_rr_trivial_gcd(f, g, u, K): """Handle trivial cases in GCD algorithm over a ring. """ zero_f = dmp_zero_p(f, u) zero_g = dmp_zero_p(g, u) if_contain_one = dmp_one_p(f, u, K) or dmp_one_p(g, u, K) if zero_f and zero_g: return tuple(dmp_zeros(3, u, K)) elif zero_f: if K.is_nonnegative(dmp_ground_LC(g, u, K)): return g, dmp_zero(u), dmp_one(u, K) else: return dmp_neg(g, u, K), dmp_zero(u), dmp_ground(-K.one, u) elif zero_g: if K.is_nonnegative(dmp_ground_LC(f, u, K)): return f, dmp_one(u, K), dmp_zero(u) else: return dmp_neg(f, u, K), dmp_ground(-K.one, u), dmp_zero(u) elif if_contain_one: return dmp_one(u, K), f, g elif query('USE_SIMPLIFY_GCD'): return _dmp_simplify_gcd(f, g, u, K) else: return None def _dmp_ff_trivial_gcd(f, g, u, K): """Handle trivial cases in GCD algorithm over a field. """ zero_f = dmp_zero_p(f, u) zero_g = dmp_zero_p(g, u) if zero_f and zero_g: return tuple(dmp_zeros(3, u, K)) elif zero_f: return (dmp_ground_monic(g, u, K), dmp_zero(u), dmp_ground(dmp_ground_LC(g, u, K), u)) elif zero_g: return (dmp_ground_monic(f, u, K), dmp_ground(dmp_ground_LC(f, u, K), u), dmp_zero(u)) elif query('USE_SIMPLIFY_GCD'): return _dmp_simplify_gcd(f, g, u, K) else: return None def _dmp_simplify_gcd(f, g, u, K): """Try to eliminate `x_0` from GCD computation in `K[X]`. """ df = dmp_degree(f, u) dg = dmp_degree(g, u) if df > 0 and dg > 0: return None if not (df or dg): F = dmp_LC(f, K) G = dmp_LC(g, K) else: if not df: F = dmp_LC(f, K) G = dmp_content(g, u, K) else: F = dmp_content(f, u, K) G = dmp_LC(g, K) v = u - 1 h = dmp_gcd(F, G, v, K) cff = [ dmp_quo(cf, h, v, K) for cf in f ] cfg = [ dmp_quo(cg, h, v, K) for cg in g ] return [h], cff, cfg def dup_rr_prs_gcd(f, g, K): """ Computes polynomial GCD using subresultants over a ring. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_rr_prs_gcd(x**2 - 1, x**2 - 3*x + 2) (x - 1, x + 1, x - 2) """ result = _dup_rr_trivial_gcd(f, g, K) if result is not None: return result fc, F = dup_primitive(f, K) gc, G = dup_primitive(g, K) c = K.gcd(fc, gc) h = dup_subresultants(F, G, K)[-1] _, h = dup_primitive(h, K) if K.is_negative(dup_LC(h, K)): c = -c h = dup_mul_ground(h, c, K) cff = dup_quo(f, h, K) cfg = dup_quo(g, h, K) return h, cff, cfg def dup_ff_prs_gcd(f, g, K): """ Computes polynomial GCD using subresultants over a field. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> R.dup_ff_prs_gcd(x**2 - 1, x**2 - 3*x + 2) (x - 1, x + 1, x - 2) """ result = _dup_ff_trivial_gcd(f, g, K) if result is not None: return result h = dup_subresultants(f, g, K)[-1] h = dup_monic(h, K) cff = dup_quo(f, h, K) cfg = dup_quo(g, h, K) return h, cff, cfg def dmp_rr_prs_gcd(f, g, u, K): """ Computes polynomial GCD using subresultants over a ring. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_rr_prs_gcd(f, g) (x + y, x + y, x) """ if not u: return dup_rr_prs_gcd(f, g, K) result = _dmp_rr_trivial_gcd(f, g, u, K) if result is not None: return result fc, F = dmp_primitive(f, u, K) gc, G = dmp_primitive(g, u, K) h = dmp_subresultants(F, G, u, K)[-1] c, _, _ = dmp_rr_prs_gcd(fc, gc, u - 1, K) if K.is_negative(dmp_ground_LC(h, u, K)): h = dmp_neg(h, u, K) _, h = dmp_primitive(h, u, K) h = dmp_mul_term(h, c, 0, u, K) cff = dmp_quo(f, h, u, K) cfg = dmp_quo(g, h, u, K) return h, cff, cfg def dmp_ff_prs_gcd(f, g, u, K): """ Computes polynomial GCD using subresultants over a field. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x,y, = ring("x,y", QQ) >>> f = QQ(1,2)*x**2 + x*y + QQ(1,2)*y**2 >>> g = x**2 + x*y >>> R.dmp_ff_prs_gcd(f, g) (x + y, 1/2*x + 1/2*y, x) """ if not u: return dup_ff_prs_gcd(f, g, K) result = _dmp_ff_trivial_gcd(f, g, u, K) if result is not None: return result fc, F = dmp_primitive(f, u, K) gc, G = dmp_primitive(g, u, K) h = dmp_subresultants(F, G, u, K)[-1] c, _, _ = dmp_ff_prs_gcd(fc, gc, u - 1, K) _, h = dmp_primitive(h, u, K) h = dmp_mul_term(h, c, 0, u, K) h = dmp_ground_monic(h, u, K) cff = dmp_quo(f, h, u, K) cfg = dmp_quo(g, h, u, K) return h, cff, cfg HEU_GCD_MAX = 6 def _dup_zz_gcd_interpolate(h, x, K): """Interpolate polynomial GCD from integer GCD. """ f = [] while h: g = h % x if g > x // 2: g -= x f.insert(0, g) h = (h - g) // x return f def dup_zz_heu_gcd(f, g, K): """ Heuristic polynomial GCD in `Z[x]`. Given univariate polynomials `f` and `g` in `Z[x]`, returns their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg`` such that:: h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h) The algorithm is purely heuristic which means it may fail to compute the GCD. This will be signaled by raising an exception. In this case you will need to switch to another GCD method. The algorithm computes the polynomial GCD by evaluating polynomials f and g at certain points and computing (fast) integer GCD of those evaluations. The polynomial GCD is recovered from the integer image by interpolation. The final step is to verify if the result is the correct GCD. This gives cofactors as a side effect. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_zz_heu_gcd(x**2 - 1, x**2 - 3*x + 2) (x - 1, x + 1, x - 2) References ========== 1. [Liao95]_ """ result = _dup_rr_trivial_gcd(f, g, K) if result is not None: return result df = dup_degree(f) dg = dup_degree(g) gcd, f, g = dup_extract(f, g, K) if df == 0 or dg == 0: return [gcd], f, g f_norm = dup_max_norm(f, K) g_norm = dup_max_norm(g, K) B = K(2*min(f_norm, g_norm) + 29) x = max(min(B, 99*K.sqrt(B)), 2*min(f_norm // abs(dup_LC(f, K)), g_norm // abs(dup_LC(g, K))) + 2) for i in range(0, HEU_GCD_MAX): ff = dup_eval(f, x, K) gg = dup_eval(g, x, K) if ff and gg: h = K.gcd(ff, gg) cff = ff // h cfg = gg // h h = _dup_zz_gcd_interpolate(h, x, K) h = dup_primitive(h, K)[1] cff_, r = dup_div(f, h, K) if not r: cfg_, r = dup_div(g, h, K) if not r: h = dup_mul_ground(h, gcd, K) return h, cff_, cfg_ cff = _dup_zz_gcd_interpolate(cff, x, K) h, r = dup_div(f, cff, K) if not r: cfg_, r = dup_div(g, h, K) if not r: h = dup_mul_ground(h, gcd, K) return h, cff, cfg_ cfg = _dup_zz_gcd_interpolate(cfg, x, K) h, r = dup_div(g, cfg, K) if not r: cff_, r = dup_div(f, h, K) if not r: h = dup_mul_ground(h, gcd, K) return h, cff_, cfg x = 73794*x * K.sqrt(K.sqrt(x)) // 27011 raise HeuristicGCDFailed('no luck') def _dmp_zz_gcd_interpolate(h, x, v, K): """Interpolate polynomial GCD from integer GCD. """ f = [] while not dmp_zero_p(h, v): g = dmp_ground_trunc(h, x, v, K) f.insert(0, g) h = dmp_sub(h, g, v, K) h = dmp_quo_ground(h, x, v, K) if K.is_negative(dmp_ground_LC(f, v + 1, K)): return dmp_neg(f, v + 1, K) else: return f def dmp_zz_heu_gcd(f, g, u, K): """ Heuristic polynomial GCD in `Z[X]`. Given univariate polynomials `f` and `g` in `Z[X]`, returns their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg`` such that:: h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h) The algorithm is purely heuristic which means it may fail to compute the GCD. This will be signaled by raising an exception. In this case you will need to switch to another GCD method. The algorithm computes the polynomial GCD by evaluating polynomials f and g at certain points and computing (fast) integer GCD of those evaluations. The polynomial GCD is recovered from the integer image by interpolation. The evaluation proces reduces f and g variable by variable into a large integer. The final step is to verify if the interpolated polynomial is the correct GCD. This gives cofactors of the input polynomials as a side effect. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_zz_heu_gcd(f, g) (x + y, x + y, x) References ========== 1. [Liao95]_ """ if not u: return dup_zz_heu_gcd(f, g, K) result = _dmp_rr_trivial_gcd(f, g, u, K) if result is not None: return result gcd, f, g = dmp_ground_extract(f, g, u, K) f_norm = dmp_max_norm(f, u, K) g_norm = dmp_max_norm(g, u, K) B = K(2*min(f_norm, g_norm) + 29) x = max(min(B, 99*K.sqrt(B)), 2*min(f_norm // abs(dmp_ground_LC(f, u, K)), g_norm // abs(dmp_ground_LC(g, u, K))) + 2) for i in range(0, HEU_GCD_MAX): ff = dmp_eval(f, x, u, K) gg = dmp_eval(g, x, u, K) v = u - 1 if not (dmp_zero_p(ff, v) or dmp_zero_p(gg, v)): h, cff, cfg = dmp_zz_heu_gcd(ff, gg, v, K) h = _dmp_zz_gcd_interpolate(h, x, v, K) h = dmp_ground_primitive(h, u, K)[1] cff_, r = dmp_div(f, h, u, K) if dmp_zero_p(r, u): cfg_, r = dmp_div(g, h, u, K) if dmp_zero_p(r, u): h = dmp_mul_ground(h, gcd, u, K) return h, cff_, cfg_ cff = _dmp_zz_gcd_interpolate(cff, x, v, K) h, r = dmp_div(f, cff, u, K) if dmp_zero_p(r, u): cfg_, r = dmp_div(g, h, u, K) if dmp_zero_p(r, u): h = dmp_mul_ground(h, gcd, u, K) return h, cff, cfg_ cfg = _dmp_zz_gcd_interpolate(cfg, x, v, K) h, r = dmp_div(g, cfg, u, K) if dmp_zero_p(r, u): cff_, r = dmp_div(f, h, u, K) if dmp_zero_p(r, u): h = dmp_mul_ground(h, gcd, u, K) return h, cff_, cfg x = 73794*x * K.sqrt(K.sqrt(x)) // 27011 raise HeuristicGCDFailed('no luck') def dup_qq_heu_gcd(f, g, K0): """ Heuristic polynomial GCD in `Q[x]`. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2) >>> g = QQ(1,2)*x**2 + x >>> R.dup_qq_heu_gcd(f, g) (x + 2, 1/2*x + 3/4, 1/2*x) """ result = _dup_ff_trivial_gcd(f, g, K0) if result is not None: return result K1 = K0.get_ring() cf, f = dup_clear_denoms(f, K0, K1) cg, g = dup_clear_denoms(g, K0, K1) f = dup_convert(f, K0, K1) g = dup_convert(g, K0, K1) h, cff, cfg = dup_zz_heu_gcd(f, g, K1) h = dup_convert(h, K1, K0) c = dup_LC(h, K0) h = dup_monic(h, K0) cff = dup_convert(cff, K1, K0) cfg = dup_convert(cfg, K1, K0) cff = dup_mul_ground(cff, K0.quo(c, cf), K0) cfg = dup_mul_ground(cfg, K0.quo(c, cg), K0) return h, cff, cfg def dmp_qq_heu_gcd(f, g, u, K0): """ Heuristic polynomial GCD in `Q[X]`. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x,y, = ring("x,y", QQ) >>> f = QQ(1,4)*x**2 + x*y + y**2 >>> g = QQ(1,2)*x**2 + x*y >>> R.dmp_qq_heu_gcd(f, g) (x + 2*y, 1/4*x + 1/2*y, 1/2*x) """ result = _dmp_ff_trivial_gcd(f, g, u, K0) if result is not None: return result K1 = K0.get_ring() cf, f = dmp_clear_denoms(f, u, K0, K1) cg, g = dmp_clear_denoms(g, u, K0, K1) f = dmp_convert(f, u, K0, K1) g = dmp_convert(g, u, K0, K1) h, cff, cfg = dmp_zz_heu_gcd(f, g, u, K1) h = dmp_convert(h, u, K1, K0) c = dmp_ground_LC(h, u, K0) h = dmp_ground_monic(h, u, K0) cff = dmp_convert(cff, u, K1, K0) cfg = dmp_convert(cfg, u, K1, K0) cff = dmp_mul_ground(cff, K0.quo(c, cf), u, K0) cfg = dmp_mul_ground(cfg, K0.quo(c, cg), u, K0) return h, cff, cfg def dup_inner_gcd(f, g, K): """ Computes polynomial GCD and cofactors of `f` and `g` in `K[x]`. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_inner_gcd(x**2 - 1, x**2 - 3*x + 2) (x - 1, x + 1, x - 2) """ if not K.is_Exact: try: exact = K.get_exact() except DomainError: return [K.one], f, g f = dup_convert(f, K, exact) g = dup_convert(g, K, exact) h, cff, cfg = dup_inner_gcd(f, g, exact) h = dup_convert(h, exact, K) cff = dup_convert(cff, exact, K) cfg = dup_convert(cfg, exact, K) return h, cff, cfg elif K.has_Field: if K.is_QQ and query('USE_HEU_GCD'): try: return dup_qq_heu_gcd(f, g, K) except HeuristicGCDFailed: pass return dup_ff_prs_gcd(f, g, K) else: if K.is_ZZ and query('USE_HEU_GCD'): try: return dup_zz_heu_gcd(f, g, K) except HeuristicGCDFailed: pass return dup_rr_prs_gcd(f, g, K) def _dmp_inner_gcd(f, g, u, K): """Helper function for `dmp_inner_gcd()`. """ if not K.is_Exact: try: exact = K.get_exact() except DomainError: return dmp_one(u, K), f, g f = dmp_convert(f, u, K, exact) g = dmp_convert(g, u, K, exact) h, cff, cfg = _dmp_inner_gcd(f, g, u, exact) h = dmp_convert(h, u, exact, K) cff = dmp_convert(cff, u, exact, K) cfg = dmp_convert(cfg, u, exact, K) return h, cff, cfg elif K.has_Field: if K.is_QQ and query('USE_HEU_GCD'): try: return dmp_qq_heu_gcd(f, g, u, K) except HeuristicGCDFailed: pass return dmp_ff_prs_gcd(f, g, u, K) else: if K.is_ZZ and query('USE_HEU_GCD'): try: return dmp_zz_heu_gcd(f, g, u, K) except HeuristicGCDFailed: pass return dmp_rr_prs_gcd(f, g, u, K) def dmp_inner_gcd(f, g, u, K): """ Computes polynomial GCD and cofactors of `f` and `g` in `K[X]`. Returns ``(h, cff, cfg)`` such that ``a = gcd(f, g)``, ``cff = quo(f, h)``, and ``cfg = quo(g, h)``. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_inner_gcd(f, g) (x + y, x + y, x) """ if not u: return dup_inner_gcd(f, g, K) J, (f, g) = dmp_multi_deflate((f, g), u, K) h, cff, cfg = _dmp_inner_gcd(f, g, u, K) return (dmp_inflate(h, J, u, K), dmp_inflate(cff, J, u, K), dmp_inflate(cfg, J, u, K)) def dup_gcd(f, g, K): """ Computes polynomial GCD of `f` and `g` in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_gcd(x**2 - 1, x**2 - 3*x + 2) x - 1 """ return dup_inner_gcd(f, g, K)[0] def dmp_gcd(f, g, u, K): """ Computes polynomial GCD of `f` and `g` in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_gcd(f, g) x + y """ return dmp_inner_gcd(f, g, u, K)[0] def dup_rr_lcm(f, g, K): """ Computes polynomial LCM over a ring in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_rr_lcm(x**2 - 1, x**2 - 3*x + 2) x**3 - 2*x**2 - x + 2 """ fc, f = dup_primitive(f, K) gc, g = dup_primitive(g, K) c = K.lcm(fc, gc) h = dup_quo(dup_mul(f, g, K), dup_gcd(f, g, K), K) return dup_mul_ground(h, c, K) def dup_ff_lcm(f, g, K): """ Computes polynomial LCM over a field in `K[x]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x = ring("x", QQ) >>> f = QQ(1,2)*x**2 + QQ(7,4)*x + QQ(3,2) >>> g = QQ(1,2)*x**2 + x >>> R.dup_ff_lcm(f, g) x**3 + 7/2*x**2 + 3*x """ h = dup_quo(dup_mul(f, g, K), dup_gcd(f, g, K), K) return dup_monic(h, K) def dup_lcm(f, g, K): """ Computes polynomial LCM of `f` and `g` in `K[x]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_lcm(x**2 - 1, x**2 - 3*x + 2) x**3 - 2*x**2 - x + 2 """ if K.has_Field: return dup_ff_lcm(f, g, K) else: return dup_rr_lcm(f, g, K) def dmp_rr_lcm(f, g, u, K): """ Computes polynomial LCM over a ring in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_rr_lcm(f, g) x**3 + 2*x**2*y + x*y**2 """ fc, f = dmp_ground_primitive(f, u, K) gc, g = dmp_ground_primitive(g, u, K) c = K.lcm(fc, gc) h = dmp_quo(dmp_mul(f, g, u, K), dmp_gcd(f, g, u, K), u, K) return dmp_mul_ground(h, c, u, K) def dmp_ff_lcm(f, g, u, K): """ Computes polynomial LCM over a field in `K[X]`. Examples ======== >>> from sympy.polys import ring, QQ >>> R, x,y, = ring("x,y", QQ) >>> f = QQ(1,4)*x**2 + x*y + y**2 >>> g = QQ(1,2)*x**2 + x*y >>> R.dmp_ff_lcm(f, g) x**3 + 4*x**2*y + 4*x*y**2 """ h = dmp_quo(dmp_mul(f, g, u, K), dmp_gcd(f, g, u, K), u, K) return dmp_ground_monic(h, u, K) def dmp_lcm(f, g, u, K): """ Computes polynomial LCM of `f` and `g` in `K[X]`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> f = x**2 + 2*x*y + y**2 >>> g = x**2 + x*y >>> R.dmp_lcm(f, g) x**3 + 2*x**2*y + x*y**2 """ if not u: return dup_lcm(f, g, K) if K.has_Field: return dmp_ff_lcm(f, g, u, K) else: return dmp_rr_lcm(f, g, u, K) def dmp_content(f, u, K): """ Returns GCD of multivariate coefficients. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> R.dmp_content(2*x*y + 6*x + 4*y + 12) 2*y + 6 """ cont, v = dmp_LC(f, K), u - 1 if dmp_zero_p(f, u): return cont for c in f[1:]: cont = dmp_gcd(cont, c, v, K) if dmp_one_p(cont, v, K): break if K.is_negative(dmp_ground_LC(cont, v, K)): return dmp_neg(cont, v, K) else: return cont def dmp_primitive(f, u, K): """ Returns multivariate content and a primitive polynomial. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y, = ring("x,y", ZZ) >>> R.dmp_primitive(2*x*y + 6*x + 4*y + 12) (2*y + 6, x + 2) """ cont, v = dmp_content(f, u, K), u - 1 if dmp_zero_p(f, u) or dmp_one_p(cont, v, K): return cont, f else: return cont, [ dmp_quo(c, cont, v, K) for c in f ] def dup_cancel(f, g, K, include=True): """ Cancel common factors in a rational function `f/g`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x = ring("x", ZZ) >>> R.dup_cancel(2*x**2 - 2, x**2 - 2*x + 1) (2*x + 2, x - 1) """ return dmp_cancel(f, g, 0, K, include=include) def dmp_cancel(f, g, u, K, include=True): """ Cancel common factors in a rational function `f/g`. Examples ======== >>> from sympy.polys import ring, ZZ >>> R, x,y = ring("x,y", ZZ) >>> R.dmp_cancel(2*x**2 - 2, x**2 - 2*x + 1) (2*x + 2, x - 1) """ K0 = None if K.has_Field and K.has_assoc_Ring: K0, K = K, K.get_ring() cq, f = dmp_clear_denoms(f, u, K0, K, convert=True) cp, g = dmp_clear_denoms(g, u, K0, K, convert=True) else: cp, cq = K.one, K.one _, p, q = dmp_inner_gcd(f, g, u, K) if K0 is not None: _, cp, cq = K.cofactors(cp, cq) p = dmp_convert(p, u, K, K0) q = dmp_convert(q, u, K, K0) K = K0 p_neg = K.is_negative(dmp_ground_LC(p, u, K)) q_neg = K.is_negative(dmp_ground_LC(q, u, K)) if p_neg and q_neg: p, q = dmp_neg(p, u, K), dmp_neg(q, u, K) elif p_neg: cp, p = -cp, dmp_neg(p, u, K) elif q_neg: cp, q = -cp, dmp_neg(q, u, K) if not include: return cp, cq, p, q p = dmp_mul_ground(p, cp, u, K) q = dmp_mul_ground(q, cq, u, K) return p, q
0.000315
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import copy import functools from typing import List from mypy.nodes import ARG_NAMED_OPT # pylint: disable=no-name-in-module from mypy.plugin import FunctionContext, Plugin # pylint: disable=no-name-in-module from mypy.types import CallableType, NoneType, UnionType # pylint: disable=no-name-in-module TYPED_DECORATORS = { "fallback_to_default_project_id of GoogleBaseHook": ["project_id"], "airflow.providers.google.cloud.hooks.dataflow._fallback_to_project_id_from_variables": ["project_id"], "provide_gcp_credential_file of GoogleBaseHook": [], } class TypedDecoratorPlugin(Plugin): """Mypy plugin for typed decorators.""" def get_function_hook(self, fullname: str): """Check for known typed decorators by name.""" if fullname in TYPED_DECORATORS: return functools.partial( _analyze_decorator, provided_arguments=TYPED_DECORATORS[fullname], ) return None def _analyze_decorator(function_ctx: FunctionContext, provided_arguments: List[str]): if not isinstance(function_ctx.arg_types[0][0], CallableType): return function_ctx.default_return_type if not isinstance(function_ctx.default_return_type, CallableType): return function_ctx.default_return_type return _change_decorator_function_type( function_ctx.arg_types[0][0], function_ctx.default_return_type, provided_arguments, ) def _change_decorator_function_type( decorated: CallableType, decorator: CallableType, provided_arguments: List[str], ) -> CallableType: decorator.arg_kinds = decorated.arg_kinds decorator.arg_names = decorated.arg_names # Mark provided arguments as optional decorator.arg_types = copy.copy(decorated.arg_types) for argument in provided_arguments: index = decorated.arg_names.index(argument) decorated_type = decorated.arg_types[index] decorator.arg_types[index] = UnionType.make_union([decorated_type, NoneType()]) decorated.arg_kinds[index] = ARG_NAMED_OPT return decorator def plugin(version: str): # pylint: disable=unused-argument """Mypy plugin entrypoint.""" return TypedDecoratorPlugin
0.00166
from django.db import transaction from django.conf import settings from django.contrib import admin from django.contrib.auth.forms import (UserCreationForm, UserChangeForm, AdminPasswordChangeForm) from django.contrib.auth.models import User, Group from django.contrib import messages from django.core.exceptions import PermissionDenied from django.http import HttpResponseRedirect, Http404 from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from django.utils.html import escape from django.utils.decorators import method_decorator from django.utils.translation import ugettext, ugettext_lazy as _ from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters csrf_protect_m = method_decorator(csrf_protect) sensitive_post_parameters_m = method_decorator(sensitive_post_parameters()) class GroupAdmin(admin.ModelAdmin): search_fields = ('name',) ordering = ('name',) filter_horizontal = ('permissions',) def formfield_for_manytomany(self, db_field, request=None, **kwargs): if db_field.name == 'permissions': qs = kwargs.get('queryset', db_field.rel.to.objects) # Avoid a major performance hit resolving permission names which # triggers a content_type load: kwargs['queryset'] = qs.select_related('content_type') return super(GroupAdmin, self).formfield_for_manytomany( db_field, request=request, **kwargs) class UserAdmin(admin.ModelAdmin): add_form_template = 'admin/auth/user/add_form.html' change_user_password_template = None fieldsets = ( (None, {'fields': ('username', 'password')}), (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}), (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}), (_('Important dates'), {'fields': ('last_login', 'date_joined')}), ) add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('username', 'password1', 'password2')} ), ) form = UserChangeForm add_form = UserCreationForm change_password_form = AdminPasswordChangeForm list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff') list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups') search_fields = ('username', 'first_name', 'last_name', 'email') ordering = ('username',) filter_horizontal = ('groups', 'user_permissions',) def get_fieldsets(self, request, obj=None): if not obj: return self.add_fieldsets return super(UserAdmin, self).get_fieldsets(request, obj) def get_form(self, request, obj=None, **kwargs): """ Use special form during user creation """ defaults = {} if obj is None: defaults.update({ 'form': self.add_form, 'fields': admin.util.flatten_fieldsets(self.add_fieldsets), }) defaults.update(kwargs) return super(UserAdmin, self).get_form(request, obj, **defaults) def get_urls(self): from django.conf.urls import patterns return patterns('', (r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password)) ) + super(UserAdmin, self).get_urls() def lookup_allowed(self, lookup, value): # See #20078: we don't want to allow any lookups involving passwords. if lookup.startswith('password'): return False return super(UserAdmin, self).lookup_allowed(lookup, value) @sensitive_post_parameters_m @csrf_protect_m @transaction.commit_on_success def add_view(self, request, form_url='', extra_context=None): # It's an error for a user to have add permission but NOT change # permission for users. If we allowed such users to add users, they # could create superusers, which would mean they would essentially have # the permission to change users. To avoid the problem entirely, we # disallow users from adding users if they don't have change # permission. if not self.has_change_permission(request): if self.has_add_permission(request) and settings.DEBUG: # Raise Http404 in debug mode so that the user gets a helpful # error message. raise Http404( 'Your user does not have the "Change user" permission. In ' 'order to add users, Django requires that your user ' 'account have both the "Add user" and "Change user" ' 'permissions set.') raise PermissionDenied if extra_context is None: extra_context = {} username_field = self.model._meta.get_field(self.model.USERNAME_FIELD) defaults = { 'auto_populated_fields': (), 'username_help_text': username_field.help_text, } extra_context.update(defaults) return super(UserAdmin, self).add_view(request, form_url, extra_context) @sensitive_post_parameters_m def user_change_password(self, request, id, form_url=''): if not self.has_change_permission(request): raise PermissionDenied user = get_object_or_404(self.queryset(request), pk=id) if request.method == 'POST': form = self.change_password_form(user, request.POST) if form.is_valid(): form.save() msg = ugettext('Password changed successfully.') messages.success(request, msg) return HttpResponseRedirect('..') else: form = self.change_password_form(user) fieldsets = [(None, {'fields': list(form.base_fields)})] adminForm = admin.helpers.AdminForm(form, fieldsets, {}) context = { 'title': _('Change password: %s') % escape(user.get_username()), 'adminForm': adminForm, 'form_url': form_url, 'form': form, 'is_popup': '_popup' in request.REQUEST, 'add': True, 'change': False, 'has_delete_permission': False, 'has_change_permission': True, 'has_absolute_url': False, 'opts': self.model._meta, 'original': user, 'save_as': False, 'show_save': True, } return TemplateResponse(request, self.change_user_password_template or 'admin/auth/user/change_password.html', context, current_app=self.admin_site.name) def response_add(self, request, obj, post_url_continue=None): """ Determines the HttpResponse for the add_view stage. It mostly defers to its superclass implementation but is customized because the User model has a slightly different workflow. """ # We should allow further modification of the user just added i.e. the # 'Save' button should behave like the 'Save and continue editing' # button except in two scenarios: # * The user has pressed the 'Save and add another' button # * We are adding a user in a popup if '_addanother' not in request.POST and '_popup' not in request.POST: request.POST['_continue'] = 1 return super(UserAdmin, self).response_add(request, obj, post_url_continue) admin.site.register(Group, GroupAdmin) admin.site.register(User, UserAdmin)
0.000904
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.website.utils import find_first_image from frappe.utils import cstr import re def execute(): item_details = frappe._dict() for d in frappe.db.sql("select name, description_html, description from `tabItem`", as_dict=1): description = cstr(d.description_html).strip() or cstr(d.description).strip() image_url, new_desc = extract_image_and_description(description) item_details.setdefault(d.name, frappe._dict({ "old_description": description, "new_description": new_desc, "image_url": image_url })) dt_list= ["Purchase Order Item","Supplier Quotation Item", "BOM", "BOM Explosion Item" , \ "BOM Item", "Opportunity Item" , "Quotation Item" , "Sales Order Item" , "Delivery Note Item" , \ "Material Request Item" , "Purchase Receipt Item" , "Stock Entry Detail"] for dt in dt_list: frappe.reload_doctype(dt) records = frappe.db.sql("""select name, `{0}` as item_code, description from `tab{1}` where description is not null and image is null and description like '%%<img%%'""" .format("item" if dt=="BOM" else "item_code", dt), as_dict=1) count = 1 for d in records: if d.item_code and item_details.get(d.item_code) \ and cstr(d.description) == item_details.get(d.item_code).old_description: image_url = item_details.get(d.item_code).image_url desc = item_details.get(d.item_code).new_description else: image_url, desc = extract_image_and_description(cstr(d.description)) if image_url: frappe.db.sql("""update `tab{0}` set description = %s, image = %s where name = %s """.format(dt), (desc, image_url, d.name)) count += 1 if count % 500 == 0: frappe.db.commit() def extract_image_and_description(data): image_url = find_first_image(data) desc = re.sub("\<img[^>]+\>", "", data) return image_url, desc
0.029633
#-*- coding: utf-8 -*- import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'django_sample_app_test', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Brussels' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(os.path.abspath(os.path.curdir), 'tests') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '' STATIC_URL = '/static/' SECRET_KEY = 'v2824l&2-n+4zznbsk9c-ap5i)b3e8b+%*a=dxqlahm^%)68jn' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', ) ROOT_URLCONF = 'sample_app.tests.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. # os.path.join(os.path.dirname(__file__), "..", "templates"), os.path.join(os.path.dirname(__file__), 'templates'), ) INSTALLED_APPS = [ 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'sample_app', 'sample_app.tests', ] SAMPLE_APP_REDIRECT_TO_URL_NAME = 'home'
0.002476
from collections import Mapping, MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available class RLock: def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict from .packages.six import iterkeys, itervalues, PY3 __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] _Null = object() class RecentlyUsedContainer(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping values = list(itervalues(self._container)) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return list(iterkeys(self._container)) _dict_setitem = dict.__setitem__ _dict_getitem = dict.__getitem__ _dict_delitem = dict.__delitem__ _dict_contains = dict.__contains__ _dict_setdefault = dict.setdefault class HTTPHeaderDict(dict): """ :param headers: An iterable of field-value pairs. Must not contain multiple field names when compared case-insensitively. :param kwargs: Additional field-value pairs to pass in to ``dict.update``. A ``dict`` like container for storing HTTP Headers. Field names are stored and compared case-insensitively in compliance with RFC 7230. Iteration provides the first case-sensitive key seen for each case-insensitive pair. Using ``__setitem__`` syntax overwrites fields that compare equal case-insensitively in order to maintain ``dict``'s api. For fields that compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` in a loop. If multiple fields that are equal case-insensitively are passed to the constructor or ``.update``, the behavior is undefined and some will be lost. >>> headers = HTTPHeaderDict() >>> headers.add('Set-Cookie', 'foo=bar') >>> headers.add('set-cookie', 'baz=quxx') >>> headers['content-length'] = '7' >>> headers['SET-cookie'] 'foo=bar, baz=quxx' >>> headers['Content-Length'] '7' """ def __init__(self, headers=None, **kwargs): dict.__init__(self) if headers is not None: if isinstance(headers, HTTPHeaderDict): self._copy_from(headers) else: self.extend(headers) if kwargs: self.extend(kwargs) def __setitem__(self, key, val): return _dict_setitem(self, key.lower(), (key, val)) def __getitem__(self, key): val = _dict_getitem(self, key.lower()) return ', '.join(val[1:]) def __delitem__(self, key): return _dict_delitem(self, key.lower()) def __contains__(self, key): return _dict_contains(self, key.lower()) def __eq__(self, other): if not isinstance(other, Mapping) and not hasattr(other, 'keys'): return False if not isinstance(other, type(self)): other = type(self)(other) return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other) def __ne__(self, other): return not self.__eq__(other) values = MutableMapping.values get = MutableMapping.get update = MutableMapping.update if not PY3: # Python 2 iterkeys = MutableMapping.iterkeys itervalues = MutableMapping.itervalues __marker = object() def pop(self, key, default=__marker): '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' # Using the MutableMapping function directly fails due to the private marker. # Using ordinary dict.pop would expose the internal structures. # So let's reinvent the wheel. try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def discard(self, key): try: del self[key] except KeyError: pass def add(self, key, val): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ key_lower = key.lower() new_vals = key, val # Keep the common case aka no item present as fast as possible vals = _dict_setdefault(self, key_lower, new_vals) if new_vals is not vals: # new_vals was not inserted, as there was a previous one if isinstance(vals, list): # If already several items got inserted, we have a list vals.append(val) else: # vals should be a tuple then, i.e. only one item so far # Need to convert the tuple to list for further extension _dict_setitem(self, key_lower, [vals[0], vals[1], val]) def extend(*args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 2: raise TypeError("update() takes at most 2 positional " "arguments ({} given)".format(len(args))) elif not args: raise TypeError("update() takes at least 1 argument (0 given)") self = args[0] other = args[1] if len(args) >= 2 else () if isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value) def getlist(self, key): """Returns a list of all the values for the named field. Returns an empty list if the key doesn't exist.""" try: vals = _dict_getitem(self, key.lower()) except KeyError: return [] else: if isinstance(vals, tuple): return [vals[1]] else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist def __repr__(self): return "%s(%s)" % (type(self).__name__, dict(self.itermerged())) def _copy_from(self, other): for key in other: val = _dict_getitem(other, key) if isinstance(val, list): # Don't need to convert tuples val = list(val) _dict_setitem(self, key, val) def copy(self): clone = type(self)() clone._copy_from(self) return clone def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = _dict_getitem(self, key) for val in vals[1:]: yield vals[0], val def itermerged(self): """Iterate over all headers, merging duplicate ones together.""" for key in self: val = _dict_getitem(self, key) yield val[0], ', '.join(val[1:]) def items(self): return list(self.iteritems()) @classmethod def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2 """Read headers from a Python 2 httplib message object.""" ret = cls(message.items()) # ret now contains only the last header line for each duplicate. # Importing with all duplicates would be nice, but this would # mean to repeat most of the raw parsing already done, when the # message object was created. Extracting only the headers of interest # separately, the cookies, should be faster and requires less # extra code. for key in duplicates: ret.discard(key) for val in message.getheaders(key): ret.add(key, val) return ret
0.000955
# -*- coding: utf-8 -*- ############################################################################## # # This file is part of partner_academic_title, # an Odoo module. # # Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>) # # partner_academic_title is free software: # you can redistribute it and/or modify it under the terms of the GNU # Affero General Public License as published by the Free Software # Foundation,either version 3 of the License, or (at your option) any # later version. # # partner_academic_title is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with partner_academic_title. # If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class PartnerAcademicTitle(models.Model): _name = 'partner.academic.title' name = fields.Char(required=True, translate=True) sequence = fields.Integer(required=True, help="""defines the order to display titles""") active = fields.Boolean(default=True)
0
import unittest from depsolver.package \ import \ PackageInfo from depsolver.pool \ import \ Pool from depsolver.repository \ import \ Repository from depsolver.request \ import \ _Job, Request from depsolver.requirement \ import \ Requirement from depsolver.version \ import \ SemanticVersion P = PackageInfo.from_string V = SemanticVersion.from_string R = Requirement.from_string class TestRequest(unittest.TestCase): def setUp(self): self.mkl_10_3_0 = P("mkl-10.3.0") self.mkl_11_0_0 = P("mkl-11.0.0") self.numpy_1_7_0 = P("numpy-1.7.0; depends (mkl >= 11.0.0)") self.scipy_0_12_0 = P("scipy-0.12.0; depends (numpy >= 1.7.0)") repo = Repository([self.mkl_10_3_0, self.mkl_11_0_0, self.numpy_1_7_0, self.scipy_0_12_0]) self.pool = Pool([repo]) def test_simple_install(self): r_jobs = [ _Job([self.scipy_0_12_0], "install", R("scipy")), _Job([self.numpy_1_7_0], "install", R("numpy")), ] request = Request(self.pool) request.install(R("scipy")) request.install(R("numpy")) self.assertEqual(request.jobs, r_jobs) def test_simple_update(self): r_jobs = [ _Job([self.numpy_1_7_0], "update", R("numpy")), ] request = Request(self.pool) request.update(R("numpy")) self.assertEqual(request.jobs, r_jobs) def test_simple_remove(self): r_jobs = [ _Job([self.numpy_1_7_0], "remove", R("numpy")), ] request = Request(self.pool) request.remove(R("numpy")) self.assertEqual(request.jobs, r_jobs) def test_simple_upgrade(self): r_jobs = [_Job([], "upgrade", None)] request = Request(self.pool) request.upgrade() self.assertEqual(request.jobs, r_jobs)
0.004199
import logging import typing from collections import defaultdict from typing import Union, List, Optional, Dict from rx.subjects import Subject from twisted.internet import reactor from twisted.internet.defer import Deferred, DeferredList, succeed from twisted.python.failure import Failure from txwebsocket.txws import WebSocketFactory from vortex.DeferUtil import yesMainThread from vortex.PayloadEnvelope import VortexMsgList, PayloadEnvelope from vortex.PayloadIO import PayloadIO from vortex.PayloadPriority import DEFAULT_PRIORITY from vortex.VortexABC import VortexABC from vortex.VortexClientHttp import VortexClientHttp from vortex.VortexClientTcp import VortexClientTcp from vortex.VortexServer import VortexServer from vortex.VortexServerHttpResource import VortexServerHttpResource from vortex.VortexServerTcp import VortexTcpServerFactory from vortex.VortexServerWebsocket import VortexWebsocketServerFactory, \ VortexWebSocketUpgradeResource, VortexWrappedWebSocketFactory logger = logging.getLogger(__name__) broadcast = None browserVortexName = "browser" class NoVortexException(Exception): """ No Vortex Exception This is raised when a remote vortex doesn't exist. """ VortexUuidList = List[str] class VortexFactory: __vortexServersByName: Dict[str, List[VortexABC]] = defaultdict(list) __vortexClientsByName: Dict[str, List[VortexABC]] = defaultdict(list) __vortexStatusChangeSubjectsByName: Dict[str, Subject] = {} __isShutdown = False def __init__(self): raise Exception("Vortex Factory should not be instantiated") @classmethod def shutdown(cls) -> None: cls.__isShutdown = True @classmethod def _getVortexSendRefs(cls, name=None, uuid=None ) -> List[typing.Tuple[VortexABC, List[str]]]: assert name or uuid results = [] # logger.debug("-" * 80) for vortex in cls._allVortexes(): uuids: List[str] = [] # logger.debug("FROM : %s", vortex.localVortexInfo) for remoteVortexInfo in vortex.remoteVortexInfo: # logger.debug(" REMOTE : %s", remoteVortexInfo) if ((name is None or remoteVortexInfo.name == name) and (uuid is None or remoteVortexInfo.uuid == uuid)): uuids.append(remoteVortexInfo.uuid) if uuids: results.append((vortex, uuids)) return results @classmethod def _allVortexes(cls): """ All Vortexes :return: A list of all the vortexes, both client and server """ vortexes = [] for vortexList in cls.__vortexServersByName.values(): vortexes += vortexList for vortexList in cls.__vortexClientsByName.values(): vortexes += vortexList return vortexes @classmethod def createServer(cls, name: str, rootResource) -> None: """ Create Server Create a vortex server, VortexServer clients connect to this vortex serer via HTTP(S) VortexServer clients will connect and provide their names. This allows the factory to abstract away the vortex UUIDs from their names. :param name: The name of the local vortex. :param rootResource: The resource to add the vortex to. An implementation of C{twisted.web.resource.IResource} :return: None """ vortexServer = VortexServer(name) cls.__vortexServersByName[name].append(vortexServer) vortexResource = VortexServerHttpResource(vortexServer) rootResource.putChild(b"vortex", vortexResource) @classmethod def createWebsocketServer(cls, name: str, port: int) -> None: """ Create Server Create a vortex server, VortexServer clients connect to this vortex serer via HTTP(S) VortexServer clients will connect and provide their names. This allows the factory to abstract away the vortex UUIDs from their names. :param name: The name of the local vortex. :param port: The tcp port to listen on :return: None """ vortexServer = VortexServer(name) cls.__vortexServersByName[name].append(vortexServer) vortexWebsocketServerFactory = VortexWebsocketServerFactory(vortexServer) reactor.listenTCP(port, WebSocketFactory(vortexWebsocketServerFactory)) @classmethod def createHttpWebsocketServer(cls, name: str, rootResource) -> None: """ Create Server Create a vortex server, VortexServer clients connect to this vortex serer via HTTP(S) VortexServer clients will connect and provide their names. This allows the factory to abstract away the vortex UUIDs from their names. :param name: The name of the local vortex. :param port: The tcp port to listen on :return: None """ vortexServer = VortexServer(name) cls.__vortexServersByName[name].append(vortexServer) vortexWebsocketServerFactory = VortexWebsocketServerFactory(vortexServer) websocketFactory = VortexWrappedWebSocketFactory(vortexWebsocketServerFactory) websocketResource = VortexWebSocketUpgradeResource(websocketFactory) rootResource.putChild(b"vortexws", websocketResource) @classmethod def createTcpServer(cls, name: str, port: int) -> None: """ Create Server Create a vortex server, VortexServer clients connect to this vortex serer via HTTP(S) VortexServer clients will connect and provide their names. This allows the factory to abstract away the vortex UUIDs from their names. :param name: The name of the local vortex. :param port: The tcp port to listen on :return: None """ vortexServer = VortexServer(name) cls.__vortexServersByName[name].append(vortexServer) vortexTcpServerFactory = VortexTcpServerFactory(vortexServer) reactor.listenTCP(port, vortexTcpServerFactory) @classmethod def createHttpClient(cls, name: str, host: str, port: int) -> Deferred: """ Create Client Connect to a vortex Server. :param name: The name of the local vortex. :param host: The hostname of the remote vortex. :param port: The port of the remote vortex. :return: A deferred from the VortexHttpClient.connect method """ logger.info('Connecting to Peek Server HTTP %s:%s', host, port) vortexClient = VortexClientHttp(name) cls.__vortexClientsByName[name].append(vortexClient) return vortexClient.connect(host, port) @classmethod def createTcpClient(cls, name: str, host: str, port: int) -> Deferred: """ Create Client Connect to a vortex Server. :param name: The name of the local vortex. :param host: The hostname of the remote vortex. :param port: The port of the remote vortex. :return: A deferred from the VortexTcpClient.connect method """ logger.info('Connecting to Peek Server TCP %s:%s', host, port) vortexClient = VortexClientTcp(name) cls.__vortexClientsByName[name].append(vortexClient) return vortexClient.connect(host, port) @classmethod def isVortexNameLocal(cls, vortexName: str) -> bool: for vortex in cls._allVortexes(): if vortex.localVortexInfo.name == vortexName: return True return False @classmethod def getLocalVortexClients(cls, localVortexName: str) -> List[VortexABC]: vortexes: List[VortexABC] = [] for items in cls.__vortexClientsByName.values(): vortexes.extend( filter(lambda x: x.localVortexInfo.name == localVortexName, items) ) return vortexes @classmethod def getRemoteVortexUuids(cls) -> List[str]: remoteUuids = [] for vortex in cls._allVortexes(): for remoteVortexInfo in vortex.remoteVortexInfo: remoteUuids.append(remoteVortexInfo.uuid) return remoteUuids @classmethod def getRemoteVortexName(cls) -> List[str]: remoteNames = set() for vortex in cls._allVortexes(): for remoteVortexInfo in vortex.remoteVortexInfo: remoteNames.add(remoteVortexInfo.name) return list(remoteNames) @classmethod def sendVortexMsg(cls, vortexMsgs: Union[VortexMsgList, bytes], destVortexName: Optional[str] = broadcast, destVortexUuid: Optional[str] = broadcast) -> Deferred: """ Send VortexMsg Sends a payload to the remote vortex. :param vortexMsgs: The vortex message(s) to send to the remote vortex. :param destVortexName: The name of the vortex to send the payload to. This can be null, If provided, it's used to limit the vortexes the message is sent to. :param destVortexUuid: The uuid of the vortex to send the payload to, This can be null, If provided, it's used to limit the vortexes the message is sent to. :return: A C{Deferred} which will callback when the message has been sent. """ vortexAndUuids = cls._getVortexSendRefs(destVortexName, destVortexUuid) if not vortexAndUuids: raise NoVortexException("Can not find vortexes to send message to," " name=%s, uuid=%s" % (destVortexName, destVortexUuid)) deferreds = [] for vortex, uuids in vortexAndUuids: for uuid in uuids: deferreds.append(vortex.sendVortexMsg(vortexMsgs, uuid)) return DeferredList(deferreds) @classmethod def sendVortexMsgLocally(cls, vortexMsgs: Union[VortexMsgList, bytes], priority: int = DEFAULT_PRIORITY) -> Deferred: """ Send VortexMsg Sends a payload to the remote vortex. :param vortexMsgs: The vortex message(s) to deliver locally. :return: A C{Deferred} which will callback when the message has been delivered. """ yesMainThread() vortexUuid = "local" vortexName = "local" httpSession = "local" sendResponse = VortexFactory.sendVortexMsgLocally vortexMsgs = [vortexMsgs] if isinstance(vortexMsgs, bytes) else vortexMsgs def send(payloadEnvelope: PayloadEnvelope): try: PayloadIO().process( payloadEnvelope=payloadEnvelope, vortexUuid=vortexUuid, vortexName=vortexName, httpSession=httpSession, sendResponse=sendResponse ) return succeed(True) except Exception as e: return Failure(e) deferreds = [] for vortexMsg in vortexMsgs: d = PayloadEnvelope().fromVortexMsgDefer(vortexMsg) d.addCallback(send) deferreds.append(d) return DeferredList(deferreds) @classmethod def subscribeToVortexStatusChange(cls, vortexName: str) -> Subject: """ Subscribe to Vortex Status Change Subscribing to the returned observable/subject will provided updates of when the vortex goes offline, or online. .. warning:: This is only implemented for TCP Client vortexes. :param vortexName: The name of the vortex to subscribe to the status for. This will be the name of the remote vortex. """ if not vortexName in cls.__vortexStatusChangeSubjectsByName: cls.__vortexStatusChangeSubjectsByName[vortexName] = Subject() return cls.__vortexStatusChangeSubjectsByName[vortexName] @classmethod def _notifyOfVortexStatusChange(cls, vortexName: str, online: bool) -> None: if cls.__isShutdown: return logger.debug("Vortex %s went %s", vortexName, ("online" if online else "offline")) if vortexName in cls.__vortexStatusChangeSubjectsByName: cls.__vortexStatusChangeSubjectsByName[vortexName].on_next(online)
0.002026
""" MicroPython Remote - Interaction and automation tool for MicroPython MIT license; Copyright (c) 2019-2021 Damien P. George This program provides a set of utilities to interact with and automate a MicroPython device over a serial connection. Commands supported are: mpremote -- auto-detect, connect and enter REPL mpremote <device-shortcut> -- connect to given device mpremote connect <device> -- connect to given device mpremote disconnect -- disconnect current device mpremote mount <local-dir> -- mount local directory on device mpremote eval <string> -- evaluate and print the string mpremote exec <string> -- execute the string mpremote run <script> -- run the given local script mpremote fs <command> <args...> -- execute filesystem commands on the device mpremote repl -- enter REPL """ import os, sys import serial.tools.list_ports from . import pyboardextended as pyboard from .console import Console, ConsolePosix _PROG = "mpremote" _BUILTIN_COMMAND_EXPANSIONS = { # Device connection shortcuts. "a0": "connect /dev/ttyACM0", "a1": "connect /dev/ttyACM1", "a2": "connect /dev/ttyACM2", "a3": "connect /dev/ttyACM3", "u0": "connect /dev/ttyUSB0", "u1": "connect /dev/ttyUSB1", "u2": "connect /dev/ttyUSB2", "u3": "connect /dev/ttyUSB3", "c0": "connect COM0", "c1": "connect COM1", "c2": "connect COM2", "c3": "connect COM3", # Filesystem shortcuts. "cat": "fs cat", "ls": "fs ls", "cp": "fs cp", "rm": "fs rm", "mkdir": "fs mkdir", "rmdir": "fs rmdir", "df": [ "exec", "import uos\nprint('mount \\tsize \\tused \\tavail \\tuse%')\nfor _m in [''] + uos.listdir('/'):\n _s = uos.stat('/' + _m)\n if not _s[0] & 1 << 14: continue\n _s = uos.statvfs(_m)\n if _s[0]:\n _size = _s[0] * _s[2]; _free = _s[0] * _s[3]; print(_m, _size, _size - _free, _free, int(100 * (_size - _free) / _size), sep='\\t')", ], # Other shortcuts. "reset t_ms=100": [ "exec", "--no-follow", "import utime, umachine; utime.sleep_ms(t_ms); umachine.reset()", ], "bootloader t_ms=100": [ "exec", "--no-follow", "import utime, umachine; utime.sleep_ms(t_ms); umachine.bootloader()", ], "setrtc": [ "exec", "import machine; machine.RTC().datetime((2020, 1, 1, 0, 10, 0, 0, 0))", ], } def load_user_config(): # Create empty config object. config = __build_class__(lambda: None, "Config")() config.commands = {} # Get config file name. path = os.getenv("XDG_CONFIG_HOME") if path is None: path = os.getenv("HOME") if path is None: return config path = os.path.join(path, ".config") path = os.path.join(path, _PROG) config_file = os.path.join(path, "config.py") # Check if config file exists. if not os.path.exists(config_file): return config # Exec the config file in its directory. with open(config_file) as f: config_data = f.read() prev_cwd = os.getcwd() os.chdir(path) exec(config_data, config.__dict__) os.chdir(prev_cwd) return config def prepare_command_expansions(config): global _command_expansions _command_expansions = {} for command_set in (_BUILTIN_COMMAND_EXPANSIONS, config.commands): for cmd, sub in command_set.items(): cmd = cmd.split() if len(cmd) == 1: args = () else: args = tuple(c.split("=") for c in cmd[1:]) if isinstance(sub, str): sub = sub.split() _command_expansions[cmd[0]] = (args, sub) def do_command_expansion(args): def usage_error(cmd, exp_args, msg): print(f"Command {cmd} {msg}; signature is:") print(" ", cmd, " ".join("=".join(a) for a in exp_args)) sys.exit(1) last_arg_idx = len(args) pre = [] while args and args[0] in _command_expansions: cmd = args.pop(0) exp_args, exp_sub = _command_expansions[cmd] for exp_arg in exp_args: exp_arg_name = exp_arg[0] if args and "=" not in args[0]: # Argument given without a name. value = args.pop(0) elif args and args[0].startswith(exp_arg_name + "="): # Argument given with correct name. value = args.pop(0).split("=", 1)[1] else: # No argument given, or argument given with a different name. if len(exp_arg) == 1: # Required argument (it has no default). usage_error(cmd, exp_args, f"missing argument {exp_arg_name}") else: # Optional argument with a default. value = exp_arg[1] pre.append(f"{exp_arg_name}={value}") args[0:0] = exp_sub last_arg_idx = len(exp_sub) if last_arg_idx < len(args) and "=" in args[last_arg_idx]: # Extra unknown arguments given. arg = args[last_arg_idx].split("=", 1)[0] usage_error(cmd, exp_args, f"given unexpected argument {arg}") sys.exit(1) # Insert expansion with optional setting of arguments. if pre: args[0:0] = ["exec", ";".join(pre)] def do_connect(args): dev = args.pop(0) try: if dev == "list": # List attached devices. for p in sorted(serial.tools.list_ports.comports()): print( "{} {} {:04x}:{:04x} {} {}".format( p.device, p.serial_number, p.pid, p.vid, p.manufacturer, p.product ) ) return None elif dev == "auto": # Auto-detect and auto-connect to the first available device. for p in sorted(serial.tools.list_ports.comports()): try: return pyboard.PyboardExtended(p.device, baudrate=115200) except pyboard.PyboardError as er: if not er.args[0].startswith("failed to access"): raise er raise pyboard.PyboardError("no device found") elif dev.startswith("id:"): # Search for a device with the given serial number. serial_number = dev[len("id:") :] dev = None for p in serial.tools.list_ports.comports(): if p.serial_number == serial_number: return pyboard.PyboardExtended(p.device, baudrate=115200) raise pyboard.PyboardError("no device with serial number {}".format(serial_number)) else: # Connect to the given device. if dev.startswith("port:"): dev = dev[len("port:") :] return pyboard.PyboardExtended(dev, baudrate=115200) except pyboard.PyboardError as er: msg = er.args[0] if msg.startswith("failed to access"): msg += " (it may be in use by another program)" print(msg) sys.exit(1) def do_disconnect(pyb): try: if pyb.mounted: if not pyb.in_raw_repl: pyb.enter_raw_repl(soft_reset=False) pyb.umount_local() if pyb.in_raw_repl: pyb.exit_raw_repl() except OSError: # Ignore any OSError exceptions when shutting down, eg: # - pyboard.filesystem_command will close the connecton if it had an error # - umounting will fail if serial port disappeared pass pyb.close() def do_filesystem(pyb, args): def _list_recursive(files, path): if os.path.isdir(path): for entry in os.listdir(path): _list_recursive(files, os.path.join(path, entry)) else: files.append(os.path.split(path)) if args[0] == "cp" and args[1] == "-r": args.pop(0) args.pop(0) assert args[-1] == ":" args.pop() src_files = [] for path in args: _list_recursive(src_files, path) known_dirs = {""} pyb.exec_("import uos") for dir, file in src_files: dir_parts = dir.split("/") for i in range(len(dir_parts)): d = "/".join(dir_parts[: i + 1]) if d not in known_dirs: pyb.exec_("try:\n uos.mkdir('%s')\nexcept OSError as e:\n print(e)" % d) known_dirs.add(d) pyboard.filesystem_command(pyb, ["cp", os.path.join(dir, file), ":" + dir + "/"]) else: pyboard.filesystem_command(pyb, args) args.clear() def do_repl_main_loop(pyb, console_in, console_out_write, *, code_to_inject, file_to_inject): while True: console_in.waitchar(pyb.serial) c = console_in.readchar() if c: if c == b"\x1d": # ctrl-], quit break elif c == b"\x04": # ctrl-D # do a soft reset and reload the filesystem hook pyb.soft_reset_with_mount(console_out_write) elif c == b"\x0a" and code_to_inject is not None: # ctrl-j, inject code pyb.serial.write(code_to_inject) elif c == b"\x0b" and file_to_inject is not None: # ctrl-k, inject script console_out_write(bytes("Injecting %s\r\n" % file_to_inject, "utf8")) pyb.enter_raw_repl(soft_reset=False) with open(file_to_inject, "rb") as f: pyfile = f.read() try: pyb.exec_raw_no_follow(pyfile) except pyboard.PyboardError as er: console_out_write(b"Error:\r\n") console_out_write(er) pyb.exit_raw_repl() else: pyb.serial.write(c) try: n = pyb.serial.inWaiting() except OSError as er: if er.args[0] == 5: # IO error, device disappeared print("device disconnected") break if n > 0: c = pyb.serial.read(1) if c is not None: # pass character through to the console oc = ord(c) if oc in (8, 9, 10, 13, 27) or 32 <= oc <= 126: console_out_write(c) else: console_out_write(b"[%02x]" % ord(c)) def do_repl(pyb, args): capture_file = None code_to_inject = None file_to_inject = None while len(args): if args[0] == "--capture": args.pop(0) capture_file = args.pop(0) elif args[0] == "--inject-code": args.pop(0) code_to_inject = bytes(args.pop(0).replace("\\n", "\r\n"), "utf8") elif args[0] == "--inject-file": args.pop(0) file_to_inject = args.pop(0) else: break print("Connected to MicroPython at %s" % pyb.device_name) print("Use Ctrl-] to exit this shell") if capture_file is not None: print('Capturing session to file "%s"' % capture_file) capture_file = open(capture_file, "wb") if code_to_inject is not None: print("Use Ctrl-J to inject", code_to_inject) if file_to_inject is not None: print('Use Ctrl-K to inject file "%s"' % file_to_inject) console = Console() console.enter() def console_out_write(b): console.write(b) if capture_file is not None: capture_file.write(b) capture_file.flush() try: do_repl_main_loop( pyb, console, console_out_write, code_to_inject=code_to_inject, file_to_inject=file_to_inject, ) finally: console.exit() if capture_file is not None: capture_file.close() def execbuffer(pyb, buf, follow): ret_val = 0 try: pyb.exec_raw_no_follow(buf) if follow: ret, ret_err = pyb.follow(timeout=None, data_consumer=pyboard.stdout_write_bytes) if ret_err: pyboard.stdout_write_bytes(ret_err) ret_val = 1 except pyboard.PyboardError as er: print(er) ret_val = 1 except KeyboardInterrupt: ret_val = 1 return ret_val def main(): config = load_user_config() prepare_command_expansions(config) args = sys.argv[1:] pyb = None did_action = False try: while args: do_command_expansion(args) cmds = { "connect": (False, False, 1), "disconnect": (False, False, 0), "mount": (True, False, 1), "repl": (False, True, 0), "eval": (True, True, 1), "exec": (True, True, 1), "run": (True, True, 1), "fs": (True, True, 1), } cmd = args.pop(0) try: need_raw_repl, is_action, num_args_min = cmds[cmd] except KeyError: print(f"{_PROG}: '{cmd}' is not a command") return 1 if len(args) < num_args_min: print(f"{_PROG}: '{cmd}' neads at least {num_args_min} argument(s)") return 1 if cmd == "connect": if pyb is not None: do_disconnect(pyb) pyb = do_connect(args) if pyb is None: did_action = True continue if pyb is None: pyb = do_connect(["auto"]) if need_raw_repl: if not pyb.in_raw_repl: pyb.enter_raw_repl() else: if pyb.in_raw_repl: pyb.exit_raw_repl() if is_action: did_action = True if cmd == "disconnect": do_disconnect(pyb) pyb = None elif cmd == "mount": path = args.pop(0) pyb.mount_local(path) print(f"Local directory {path} is mounted at /remote") elif cmd in ("exec", "eval", "run"): follow = True if args[0] == "--no-follow": args.pop(0) follow = False if cmd == "exec": buf = args.pop(0) elif cmd == "eval": buf = "print(" + args.pop(0) + ")" else: filename = args.pop(0) try: with open(filename, "rb") as f: buf = f.read() except OSError: print(f"{_PROG}: could not read file '{filename}'") return 1 ret = execbuffer(pyb, buf, follow) if ret: return ret elif cmd == "fs": do_filesystem(pyb, args) elif cmd == "repl": do_repl(pyb, args) if not did_action: if pyb is None: pyb = do_connect(["auto"]) if pyb.in_raw_repl: pyb.exit_raw_repl() do_repl(pyb, args) finally: if pyb is not None: do_disconnect(pyb)
0.001099
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, André Paramés <[email protected]> # Based on the Git module by Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = u''' --- module: bzr author: "André Paramés (@andreparames)" version_added: "1.1" short_description: Deploy software (or files) from bzr branches description: - Manage I(bzr) branches to deploy files or software. options: name: required: true aliases: [ 'parent' ] description: - SSH or HTTP protocol address of the parent branch. dest: required: true description: - Absolute path of where the branch should be cloned to. version: required: false default: "head" description: - What version of the branch to clone. This can be the bzr revno or revid. force: required: false default: "no" choices: [ 'yes', 'no' ] description: - If C(yes), any modified files in the working tree will be discarded. Before 1.9 the default value was "yes". executable: required: false default: null version_added: "1.4" description: - Path to bzr executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. ''' EXAMPLES = ''' # Example bzr checkout from Ansible Playbooks - bzr: name: bzr+ssh://foosball.example.org/path/to/branch dest: /srv/checkout version: 22 ''' import re class Bzr(object): def __init__(self, module, parent, dest, version, bzr_path): self.module = module self.parent = parent self.dest = dest self.version = version self.bzr_path = bzr_path def _command(self, args_list, cwd=None, **kwargs): (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) return (rc, out, err) def get_version(self): '''samples the version of the bzr branch''' cmd = "%s revno" % self.bzr_path rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) revno = stdout.strip() return revno def clone(self): '''makes a new bzr branch if it does not already exist''' dest_dirname = os.path.dirname(self.dest) try: os.makedirs(dest_dirname) except: pass if self.version.lower() != 'head': args_list = ["branch", "-r", self.version, self.parent, self.dest] else: args_list = ["branch", self.parent, self.dest] return self._command(args_list, check_rc=True, cwd=dest_dirname) def has_local_mods(self): cmd = "%s status -S" % self.bzr_path rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) lines = stdout.splitlines() lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) return len(lines) > 0 def reset(self, force): ''' Resets the index and working tree to head. Discards any changes to tracked files in the working tree since that commit. ''' if not force and self.has_local_mods(): self.module.fail_json(msg="Local modifications exist in branch (force=no).") return self._command(["revert"], check_rc=True, cwd=self.dest) def fetch(self): '''updates branch from remote sources''' if self.version.lower() != 'head': (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) else: (rc, out, err) = self._command(["pull"], cwd=self.dest) if rc != 0: self.module.fail_json(msg="Failed to pull") return (rc, out, err) def switch_version(self): '''once pulled, switch to a particular revno or revid''' if self.version.lower() != 'head': args_list = ["revert", "-r", self.version] else: args_list = ["revert"] return self._command(args_list, check_rc=True, cwd=self.dest) # =========================================== def main(): module = AnsibleModule( argument_spec = dict( dest=dict(required=True, type='path'), name=dict(required=True, aliases=['parent']), version=dict(default='head'), force=dict(default='no', type='bool'), executable=dict(default=None), ) ) dest = module.params['dest'] parent = module.params['name'] version = module.params['version'] force = module.params['force'] bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') rc, out, err, status = (0, None, None, None) bzr = Bzr(module, parent, dest, version, bzr_path) # if there is no bzr configuration, do a branch operation # else pull and switch the version before = None local_mods = False if not os.path.exists(bzrconfig): (rc, out, err) = bzr.clone() else: # else do a pull local_mods = bzr.has_local_mods() before = bzr.get_version() (rc, out, err) = bzr.reset(force) if rc != 0: module.fail_json(msg=err) (rc, out, err) = bzr.fetch() if rc != 0: module.fail_json(msg=err) # switch to version specified regardless of whether # we cloned or pulled (rc, out, err) = bzr.switch_version() # determine if we changed anything after = bzr.get_version() changed = False if before != after or local_mods: changed = True module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
0.001951
#!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # mininode.py - Bitcoin P2P network half-a-node # # This python code was modified from ArtForz' public domain half-a-node, as # found in the mini-node branch of http://github.com/jgarzik/pynode. # # NodeConn: an object which manages p2p connectivity to a bitcoin node # NodeConnCB: a base class that describes the interface for receiving # callbacks with network messages from a NodeConn # CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: # data structures that should map to corresponding structures in # bitcoin/primitives # msg_block, msg_tx, msg_headers, etc.: # data structures that represent network messages # ser_*, deser_*: functions that handle serialization/deserialization import struct import socket import asyncore import time import sys import random from .util import hex_str_to_bytes, bytes_to_hex_str from io import BytesIO from codecs import encode import hashlib from threading import RLock from threading import Thread import logging import copy import linuxcoin_scrypt from test_framework.siphash import siphash256 BIP0031_VERSION = 60000 MY_VERSION = 80014 # past bip-31 for ping/pong MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MAX_INV_SZ = 50000 MAX_BLOCK_SIZE = 1000000 COIN = 100000000 # 1 btc in satoshis NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # NodeConn acquires this lock whenever delivering a message to to a NodeConnCB, # and whenever adding anything to the send buffer (in send_message()). This # lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the NodeConnCB or NodeConn. mininode_lock = RLock() # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack("<BH", 253, l) elif l < 0x100000000: r = struct.pack("<BI", 254, l) else: r = struct.pack("<BQ", 255, l) return r def deser_compact_size(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] return nit def deser_string(f): nit = deser_compact_size(f) return f.read(nit) def ser_string(s): return ser_compact_size(len(s)) + s def deser_uint256(f): r = 0 for i in range(8): t = struct.unpack("<I", f.read(4))[0] r += t << (i * 32) return r def ser_uint256(u): rs = b"" for i in range(8): rs += struct.pack("<I", u & 0xFFFFFFFF) u >>= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("<IIIIIIII", s[:32]) for i in range(8): r += t[i] << (i * 32) return r def uint256_from_compact(c): nbytes = (c >> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector (we use this for serializing the vector of transactions # for a witness block). def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = struct.unpack("<i", f.read(4))[0] r.append(t) return r def ser_int_vector(l): r = ser_compact_size(len(l)) for i in l: r += struct.pack("<i", i) return r # Deserialize from a hex string representation (eg from RPC) def FromHex(obj, hex_string): obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj # Convert a binary-serializable object to hex (eg for submission via RPC) def ToHex(obj): return bytes_to_hex_str(obj.serialize()) # Objects that map to bitcoind objects, which can be serialized/deserialized class CAddress(object): def __init__(self): self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f): self.nServices = struct.unpack("<Q", f.read(8))[0] self.pchReserved = f.read(12) self.ip = socket.inet_ntoa(f.read(4)) self.port = struct.unpack(">H", f.read(2))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nServices) r += self.pchReserved r += socket.inet_aton(self.ip) r += struct.pack(">H", self.port) return r def __repr__(self): return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port) MSG_WITNESS_FLAG = 1<<30 class CInv(object): typemap = { 0: "Error", 1: "TX", 2: "Block", 1|MSG_WITNESS_FLAG: "WitnessTx", 2|MSG_WITNESS_FLAG : "WitnessBlock", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack("<i", f.read(4))[0] self.hash = deser_uint256(f) def serialize(self): r = b"" r += struct.pack("<i", self.type) r += ser_uint256(self.hash) return r def __repr__(self): return "CInv(type=%s hash=%064x)" \ % (self.typemap[self.type], self.hash) class CBlockLocator(object): def __init__(self): self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vHave = deser_uint256_vector(f) def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256_vector(self.vHave) return r def __repr__(self): return "CBlockLocator(nVersion=%i vHave=%s)" \ % (self.nVersion, repr(self.vHave)) class COutPoint(object): def __init__(self, hash=0, n=0): self.hash = hash self.n = n def deserialize(self, f): self.hash = deser_uint256(f) self.n = struct.unpack("<I", f.read(4))[0] def serialize(self): r = b"" r += ser_uint256(self.hash) r += struct.pack("<I", self.n) return r def __repr__(self): return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) class CTxIn(object): def __init__(self, outpoint=None, scriptSig=b"", nSequence=0): if outpoint is None: self.prevout = COutPoint() else: self.prevout = outpoint self.scriptSig = scriptSig self.nSequence = nSequence def deserialize(self, f): self.prevout = COutPoint() self.prevout.deserialize(f) self.scriptSig = deser_string(f) self.nSequence = struct.unpack("<I", f.read(4))[0] def serialize(self): r = b"" r += self.prevout.serialize() r += ser_string(self.scriptSig) r += struct.pack("<I", self.nSequence) return r def __repr__(self): return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \ % (repr(self.prevout), bytes_to_hex_str(self.scriptSig), self.nSequence) class CTxOut(object): def __init__(self, nValue=0, scriptPubKey=b""): self.nValue = nValue self.scriptPubKey = scriptPubKey def deserialize(self, f): self.nValue = struct.unpack("<q", f.read(8))[0] self.scriptPubKey = deser_string(f) def serialize(self): r = b"" r += struct.pack("<q", self.nValue) r += ser_string(self.scriptPubKey) return r def __repr__(self): return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \ % (self.nValue // COIN, self.nValue % COIN, bytes_to_hex_str(self.scriptPubKey)) class CScriptWitness(object): def __init__(self): # stack is a vector of strings self.stack = [] def __repr__(self): return "CScriptWitness(%s)" % \ (",".join([bytes_to_hex_str(x) for x in self.stack])) def is_null(self): if self.stack: return False return True class CTxInWitness(object): def __init__(self): self.scriptWitness = CScriptWitness() def deserialize(self, f): self.scriptWitness.stack = deser_string_vector(f) def serialize(self): return ser_string_vector(self.scriptWitness.stack) def __repr__(self): return repr(self.scriptWitness) def is_null(self): return self.scriptWitness.is_null() class CTxWitness(object): def __init__(self): self.vtxinwit = [] def deserialize(self, f): for i in range(len(self.vtxinwit)): self.vtxinwit[i].deserialize(f) def serialize(self): r = b"" # This is different than the usual vector serialization -- # we omit the length of the vector, which is required to be # the same length as the transaction's vin vector. for x in self.vtxinwit: r += x.serialize() return r def __repr__(self): return "CTxWitness(%s)" % \ (';'.join([repr(x) for x in self.vtxinwit])) def is_null(self): for x in self.vtxinwit: if not x.is_null(): return False return True class CTransaction(object): def __init__(self, tx=None): if tx is None: self.nVersion = 1 self.vin = [] self.vout = [] self.wit = CTxWitness() self.nLockTime = 0 self.sha256 = None self.hash = None else: self.nVersion = tx.nVersion self.vin = copy.deepcopy(tx.vin) self.vout = copy.deepcopy(tx.vout) self.nLockTime = tx.nLockTime self.sha256 = tx.sha256 self.hash = tx.hash self.wit = copy.deepcopy(tx.wit) def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vin = deser_vector(f, CTxIn) flags = 0 if len(self.vin) == 0: flags = struct.unpack("<B", f.read(1))[0] # Not sure why flags can't be zero, but this # matches the implementation in bitcoind if (flags != 0): self.vin = deser_vector(f, CTxIn) self.vout = deser_vector(f, CTxOut) else: self.vout = deser_vector(f, CTxOut) if flags != 0: self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] self.wit.deserialize(f) self.nLockTime = struct.unpack("<I", f.read(4))[0] self.sha256 = None self.hash = None def serialize_without_witness(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_vector(self.vin) r += ser_vector(self.vout) r += struct.pack("<I", self.nLockTime) return r # Only serialize with witness when explicitly called for def serialize_with_witness(self): flags = 0 if not self.wit.is_null(): flags |= 1 r = b"" r += struct.pack("<i", self.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(self.vin) r += ser_vector(self.vout) if flags & 1: if (len(self.wit.vtxinwit) != len(self.vin)): # vtxinwit must have the same length as vin self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)] for i in range(len(self.wit.vtxinwit), len(self.vin)): self.wit.vtxinwit.append(CTxInWitness()) r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) return r # Regular serialization is without witness -- must explicitly # call serialize_with_witness to include witness data. def serialize(self): return self.serialize_without_witness() # Recalculate the txid (transaction hash without witness) def rehash(self): self.sha256 = None self.calc_sha256() # We will only cache the serialization without witness in # self.sha256 and self.hash -- those are expected to be the txid. def calc_sha256(self, with_witness=False): if with_witness: # Don't cache the result, just return it return uint256_from_str(hash256(self.serialize_with_witness())) if self.sha256 is None: self.sha256 = uint256_from_str(hash256(self.serialize_without_witness())) self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii') def is_valid(self): self.calc_sha256() for tout in self.vout: if tout.nValue < 0 or tout.nValue > 84000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) class CBlockHeader(object): def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.scrypt256 = header.scrypt256 self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None self.scrypt256 = None def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.hashPrevBlock = deser_uint256(f) self.hashMerkleRoot = deser_uint256(f) self.nTime = struct.unpack("<I", f.read(4))[0] self.nBits = struct.unpack("<I", f.read(4))[0] self.nNonce = struct.unpack("<I", f.read(4))[0] self.sha256 = None self.hash = None self.scrypt256 = None def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) return r def calc_sha256(self): if self.sha256 is None: r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) self.sha256 = uint256_from_str(hash256(r)) self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii') self.scrypt256 = uint256_from_str(linuxcoin_scrypt.getPoWHash(r)) def rehash(self): self.sha256 = None self.scrypt256 = None self.calc_sha256() return self.sha256 def __repr__(self): return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce) class CBlock(CBlockHeader): def __init__(self, header=None): super(CBlock, self).__init__(header) self.vtx = [] def deserialize(self, f): super(CBlock, self).deserialize(f) self.vtx = deser_vector(f, CTransaction) def serialize(self, with_witness=False): r = b"" r += super(CBlock, self).serialize() if with_witness: r += ser_vector(self.vtx, "serialize_with_witness") else: r += ser_vector(self.vtx) return r # Calculate the merkle root given a vector of transaction hashes def get_merkle_root(self, hashes): while len(hashes) > 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i+1, len(hashes)-1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def calc_witness_merkle_root(self): # For witness root purposes, the hash of the # coinbase, with witness, is defined to be 0...0 hashes = [ser_uint256(0)] for tx in self.vtx[1:]: # Calculate the hashes with witness data hashes.append(ser_uint256(tx.calc_sha256(True))) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.scrypt256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.scrypt256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class CUnsignedAlert(object): def __init__(self): self.nVersion = 1 self.nRelayUntil = 0 self.nExpiration = 0 self.nID = 0 self.nCancel = 0 self.setCancel = [] self.nMinVer = 0 self.nMaxVer = 0 self.setSubVer = [] self.nPriority = 0 self.strComment = b"" self.strStatusBar = b"" self.strReserved = b"" def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.nRelayUntil = struct.unpack("<q", f.read(8))[0] self.nExpiration = struct.unpack("<q", f.read(8))[0] self.nID = struct.unpack("<i", f.read(4))[0] self.nCancel = struct.unpack("<i", f.read(4))[0] self.setCancel = deser_int_vector(f) self.nMinVer = struct.unpack("<i", f.read(4))[0] self.nMaxVer = struct.unpack("<i", f.read(4))[0] self.setSubVer = deser_string_vector(f) self.nPriority = struct.unpack("<i", f.read(4))[0] self.strComment = deser_string(f) self.strStatusBar = deser_string(f) self.strReserved = deser_string(f) def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += struct.pack("<q", self.nRelayUntil) r += struct.pack("<q", self.nExpiration) r += struct.pack("<i", self.nID) r += struct.pack("<i", self.nCancel) r += ser_int_vector(self.setCancel) r += struct.pack("<i", self.nMinVer) r += struct.pack("<i", self.nMaxVer) r += ser_string_vector(self.setSubVer) r += struct.pack("<i", self.nPriority) r += ser_string(self.strComment) r += ser_string(self.strStatusBar) r += ser_string(self.strReserved) return r def __repr__(self): return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \ % (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID, self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority, self.strComment, self.strStatusBar, self.strReserved) class CAlert(object): def __init__(self): self.vchMsg = b"" self.vchSig = b"" def deserialize(self, f): self.vchMsg = deser_string(f) self.vchSig = deser_string(f) def serialize(self): r = b"" r += ser_string(self.vchMsg) r += ser_string(self.vchSig) return r def __repr__(self): return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \ % (len(self.vchMsg), len(self.vchSig)) class PrefilledTransaction(object): def __init__(self, index=0, tx = None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self, with_witness=False): r = b"" r += ser_compact_size(self.index) if with_witness: r += self.tx.serialize_with_witness() else: r += self.tx.serialize_without_witness() return r def serialize_with_witness(self): return self.serialize(with_witness=True) def __repr__(self): return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs(object): def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("<Q", f.read(8))[0] self.shortids_length = deser_compact_size(f) for i in range(self.shortids_length): # shortids are defined to be 6 bytes in the spec, so append # two zero bytes and read it in as an 8-byte number self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0]) self.prefilled_txn = deser_vector(f, PrefilledTransaction) self.prefilled_txn_length = len(self.prefilled_txn) # When using version 2 compact blocks, we must serialize with_witness. def serialize(self, with_witness=False): r = b"" r += self.header.serialize() r += struct.pack("<Q", self.nonce) r += ser_compact_size(self.shortids_length) for x in self.shortids: # We only want the first 6 bytes r += struct.pack("<Q", x)[0:6] if with_witness: r += ser_vector(self.prefilled_txn, "serialize_with_witness") else: r += ser_vector(self.prefilled_txn) return r def __repr__(self): return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn)) # P2P version of the above that will use witness serialization (for compact # block version 2) class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs): def serialize(self): return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True) # Calculate the BIP 152-compact blocks shortid for a given transaction hash def calculate_shortid(k0, k1, tx_hash): expected_shortid = siphash256(k0, k1, tx_hash) expected_shortid &= 0x0000ffffffffffff return expected_shortid # This version gets rid of the array lengths, and reinterprets the differential # encoding into indices that can be used for lookup. class HeaderAndShortIDs(object): def __init__(self, p2pheaders_and_shortids = None): self.header = CBlockHeader() self.nonce = 0 self.shortids = [] self.prefilled_txn = [] self.use_witness = False if p2pheaders_and_shortids != None: self.header = p2pheaders_and_shortids.header self.nonce = p2pheaders_and_shortids.nonce self.shortids = p2pheaders_and_shortids.shortids last_index = -1 for x in p2pheaders_and_shortids.prefilled_txn: self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx)) last_index = self.prefilled_txn[-1].index def to_p2p(self): if self.use_witness: ret = P2PHeaderAndShortWitnessIDs() else: ret = P2PHeaderAndShortIDs() ret.header = self.header ret.nonce = self.nonce ret.shortids_length = len(self.shortids) ret.shortids = self.shortids ret.prefilled_txn_length = len(self.prefilled_txn) ret.prefilled_txn = [] last_index = -1 for x in self.prefilled_txn: ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx)) last_index = x.index return ret def get_siphash_keys(self): header_nonce = self.header.serialize() header_nonce += struct.pack("<Q", self.nonce) hash_header_nonce_as_str = sha256(header_nonce) key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0] key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0] return [ key0, key1 ] # Version 2 compact blocks use wtxid in shortids (rather than txid) def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False): self.header = CBlockHeader(block) self.nonce = nonce self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ] self.shortids = [] self.use_witness = use_witness [k0, k1] = self.get_siphash_keys() for i in range(len(block.vtx)): if i not in prefill_list: tx_hash = block.vtx[i].sha256 if use_witness: tx_hash = block.vtx[i].calc_sha256(with_witness=True) self.shortids.append(calculate_shortid(k0, k1, tx_hash)) def __repr__(self): return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) class BlockTransactionsRequest(object): def __init__(self, blockhash=0, indexes = None): self.blockhash = blockhash self.indexes = indexes if indexes != None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) indexes_length = deser_compact_size(f) for i in range(indexes_length): self.indexes.append(deser_compact_size(f)) def serialize(self): r = b"" r += ser_uint256(self.blockhash) r += ser_compact_size(len(self.indexes)) for x in self.indexes: r += ser_compact_size(x) return r # helper to set the differentially encoded indexes from absolute ones def from_absolute(self, absolute_indexes): self.indexes = [] last_index = -1 for x in absolute_indexes: self.indexes.append(x-last_index-1) last_index = x def to_absolute(self): absolute_indexes = [] last_index = -1 for x in self.indexes: absolute_indexes.append(x+last_index+1) last_index = absolute_indexes[-1] return absolute_indexes def __repr__(self): return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes)) class BlockTransactions(object): def __init__(self, blockhash=0, transactions = None): self.blockhash = blockhash self.transactions = transactions if transactions != None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) self.transactions = deser_vector(f, CTransaction) def serialize(self, with_witness=False): r = b"" r += ser_uint256(self.blockhash) if with_witness: r += ser_vector(self.transactions, "serialize_with_witness") else: r += ser_vector(self.transactions) return r def __repr__(self): return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions)) # Objects that correspond to messages on the wire class msg_version(object): command = b"version" def __init__(self): self.nVersion = MY_VERSION self.nServices = 1 self.nTime = int(time.time()) self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) self.strSubVer = MY_SUBVERSION self.nStartingHeight = -1 def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] if self.nVersion == 10300: self.nVersion = 300 self.nServices = struct.unpack("<Q", f.read(8))[0] self.nTime = struct.unpack("<q", f.read(8))[0] self.addrTo = CAddress() self.addrTo.deserialize(f) if self.nVersion >= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f) self.nNonce = struct.unpack("<Q", f.read(8))[0] self.strSubVer = deser_string(f) if self.nVersion >= 209: self.nStartingHeight = struct.unpack("<i", f.read(4))[0] else: self.nStartingHeight = None else: self.addrFrom = None self.nNonce = None self.strSubVer = None self.nStartingHeight = None def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += struct.pack("<Q", self.nServices) r += struct.pack("<q", self.nTime) r += self.addrTo.serialize() r += self.addrFrom.serialize() r += struct.pack("<Q", self.nNonce) r += ser_string(self.strSubVer) r += struct.pack("<i", self.nStartingHeight) return r def __repr__(self): return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \ % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, self.strSubVer, self.nStartingHeight) class msg_verack(object): command = b"verack" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_verack()" class msg_addr(object): command = b"addr" def __init__(self): self.addrs = [] def deserialize(self, f): self.addrs = deser_vector(f, CAddress) def serialize(self): return ser_vector(self.addrs) def __repr__(self): return "msg_addr(addrs=%s)" % (repr(self.addrs)) class msg_alert(object): command = b"alert" def __init__(self): self.alert = CAlert() def deserialize(self, f): self.alert = CAlert() self.alert.deserialize(f) def serialize(self): r = b"" r += self.alert.serialize() return r def __repr__(self): return "msg_alert(alert=%s)" % (repr(self.alert), ) class msg_inv(object): command = b"inv" def __init__(self, inv=None): if inv is None: self.inv = [] else: self.inv = inv def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_inv(inv=%s)" % (repr(self.inv)) class msg_getdata(object): command = b"getdata" def __init__(self, inv=None): self.inv = inv if inv != None else [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_getdata(inv=%s)" % (repr(self.inv)) class msg_getblocks(object): command = b"getblocks" def __init__(self): self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getblocks(locator=%s hashstop=%064x)" \ % (repr(self.locator), self.hashstop) class msg_tx(object): command = b"tx" def __init__(self, tx=CTransaction()): self.tx = tx def deserialize(self, f): self.tx.deserialize(f) def serialize(self): return self.tx.serialize_without_witness() def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) class msg_witness_tx(msg_tx): def serialize(self): return self.tx.serialize_with_witness() class msg_block(object): command = b"block" def __init__(self, block=None): if block is None: self.block = CBlock() else: self.block = block def deserialize(self, f): self.block.deserialize(f) def serialize(self): return self.block.serialize() def __repr__(self): return "msg_block(block=%s)" % (repr(self.block)) # for cases where a user needs tighter control over what is sent over the wire # note that the user must supply the name of the command, and the data class msg_generic(object): def __init__(self, command, data=None): self.command = command self.data = data def serialize(self): return self.data def __repr__(self): return "msg_generic()" class msg_witness_block(msg_block): def serialize(self): r = self.block.serialize(with_witness=True) return r class msg_getaddr(object): command = b"getaddr" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_getaddr()" class msg_ping_prebip31(object): command = b"ping" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_ping() (pre-bip31)" class msg_ping(object): command = b"ping" def __init__(self, nonce=0): self.nonce = nonce def deserialize(self, f): self.nonce = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nonce) return r def __repr__(self): return "msg_ping(nonce=%08x)" % self.nonce class msg_pong(object): command = b"pong" def __init__(self, nonce=0): self.nonce = nonce def deserialize(self, f): self.nonce = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nonce) return r def __repr__(self): return "msg_pong(nonce=%08x)" % self.nonce class msg_mempool(object): command = b"mempool" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_mempool()" class msg_sendheaders(object): command = b"sendheaders" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_sendheaders()" # getheaders message has # number of entries # vector of hashes # hash_stop (hash of last desired block header, 0 to get as many as possible) class msg_getheaders(object): command = b"getheaders" def __init__(self): self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getheaders(locator=%s, stop=%064x)" \ % (repr(self.locator), self.hashstop) # headers message has # <count> <vector of block headers> class msg_headers(object): command = b"headers" def __init__(self): self.headers = [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers=%s)" % repr(self.headers) class msg_reject(object): command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack("<B", f.read(1))[0] self.reason = deser_string(f) if (self.code != self.REJECT_MALFORMED and (self.message == b"block" or self.message == b"tx")): self.data = deser_uint256(f) def serialize(self): r = ser_string(self.message) r += struct.pack("<B", self.code) r += ser_string(self.reason) if (self.code != self.REJECT_MALFORMED and (self.message == b"block" or self.message == b"tx")): r += ser_uint256(self.data) return r def __repr__(self): return "msg_reject: %s %d %s [%064x]" \ % (self.message, self.code, self.reason, self.data) # Helper function def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')): attempt = 0 elapsed = 0 while attempt < attempts and elapsed < timeout: with mininode_lock: if predicate(): return True attempt += 1 elapsed += 0.05 time.sleep(0.05) return False class msg_feefilter(object): command = b"feefilter" def __init__(self, feerate=0): self.feerate = feerate def deserialize(self, f): self.feerate = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.feerate) return r def __repr__(self): return "msg_feefilter(feerate=%08x)" % self.feerate class msg_sendcmpct(object): command = b"sendcmpct" def __init__(self): self.announce = False self.version = 1 def deserialize(self, f): self.announce = struct.unpack("<?", f.read(1))[0] self.version = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<?", self.announce) r += struct.pack("<Q", self.version) return r def __repr__(self): return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version) class msg_cmpctblock(object): command = b"cmpctblock" def __init__(self, header_and_shortids = None): self.header_and_shortids = header_and_shortids def deserialize(self, f): self.header_and_shortids = P2PHeaderAndShortIDs() self.header_and_shortids.deserialize(f) def serialize(self): r = b"" r += self.header_and_shortids.serialize() return r def __repr__(self): return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids) class msg_getblocktxn(object): command = b"getblocktxn" def __init__(self): self.block_txn_request = None def deserialize(self, f): self.block_txn_request = BlockTransactionsRequest() self.block_txn_request.deserialize(f) def serialize(self): r = b"" r += self.block_txn_request.serialize() return r def __repr__(self): return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request)) class msg_blocktxn(object): command = b"blocktxn" def __init__(self): self.block_transactions = BlockTransactions() def deserialize(self, f): self.block_transactions.deserialize(f) def serialize(self): r = b"" r += self.block_transactions.serialize() return r def __repr__(self): return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions)) class msg_witness_blocktxn(msg_blocktxn): def serialize(self): r = b"" r += self.block_transactions.serialize(with_witness=True) return r # This is what a callback should look like for NodeConn # Reimplement the on_* functions to provide handling for events class NodeConnCB(object): def __init__(self): self.verack_received = False # deliver_sleep_time is helpful for debugging race conditions in p2p # tests; it causes message delivery to sleep for the specified time # before acquiring the global lock and delivering the next message. self.deliver_sleep_time = None # Remember the services our peer has advertised self.peer_services = None def set_deliver_sleep_time(self, value): with mininode_lock: self.deliver_sleep_time = value def get_deliver_sleep_time(self): with mininode_lock: return self.deliver_sleep_time # Spin until verack message is received from the node. # Tests may want to use this as a signal that the test can begin. # This can be called from the testing thread, so it needs to acquire the # global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) def deliver(self, conn, message): deliver_sleep = self.get_deliver_sleep_time() if deliver_sleep is not None: time.sleep(deliver_sleep) with mininode_lock: try: getattr(self, 'on_' + message.command.decode('ascii'))(conn, message) except: print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0])) def on_version(self, conn, message): if message.nVersion >= 209: conn.send_message(msg_verack()) conn.ver_send = min(MY_VERSION, message.nVersion) if message.nVersion < 209: conn.ver_recv = conn.ver_send conn.nServices = message.nServices def on_verack(self, conn, message): conn.ver_recv = conn.ver_send self.verack_received = True def on_inv(self, conn, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): conn.send_message(want) def on_addr(self, conn, message): pass def on_alert(self, conn, message): pass def on_getdata(self, conn, message): pass def on_getblocks(self, conn, message): pass def on_tx(self, conn, message): pass def on_block(self, conn, message): pass def on_getaddr(self, conn, message): pass def on_headers(self, conn, message): pass def on_getheaders(self, conn, message): pass def on_ping(self, conn, message): if conn.ver_send > BIP0031_VERSION: conn.send_message(msg_pong(message.nonce)) def on_reject(self, conn, message): pass def on_close(self, conn): pass def on_mempool(self, conn): pass def on_pong(self, conn, message): pass def on_feefilter(self, conn, message): pass def on_sendheaders(self, conn, message): pass def on_sendcmpct(self, conn, message): pass def on_cmpctblock(self, conn, message): pass def on_getblocktxn(self, conn, message): pass def on_blocktxn(self, conn, message): pass # More useful callbacks and functions for NodeConnCB's which have a single NodeConn class SingleNodeConnCB(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def send_and_ping(self, message): self.send_message(message) self.sync_with_ping() def on_pong(self, conn, message): self.last_pong = message # Sync up with the node def sync_with_ping(self, timeout=30): def received_pong(): return (self.last_pong.nonce == self.ping_counter) self.send_message(msg_ping(nonce=self.ping_counter)) success = wait_until(received_pong, timeout=timeout) self.ping_counter += 1 return success # The actual NodeConn class # This class provides an interface for a p2p connection to a specified node class NodeConn(asyncore.dispatcher): messagemap = { b"version": msg_version, b"verack": msg_verack, b"addr": msg_addr, b"alert": msg_alert, b"inv": msg_inv, b"getdata": msg_getdata, b"getblocks": msg_getblocks, b"tx": msg_tx, b"block": msg_block, b"getaddr": msg_getaddr, b"ping": msg_ping, b"pong": msg_pong, b"headers": msg_headers, b"getheaders": msg_getheaders, b"reject": msg_reject, b"mempool": msg_mempool, b"feefilter": msg_feefilter, b"sendheaders": msg_sendheaders, b"sendcmpct": msg_sendcmpct, b"cmpctblock": msg_cmpctblock, b"getblocktxn": msg_getblocktxn, b"blocktxn": msg_blocktxn } MAGIC_BYTES = { "mainnet": b"\xfb\xc0\xb6\xdb", # mainnet "testnet3": b"\xfc\xc1\xb7\xdc", # testnet3 "regtest": b"\xfa\xbf\xb5\xda", # regtest } def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK): asyncore.dispatcher.__init__(self, map=mininode_socket_map) self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport)) self.dstaddr = dstaddr self.dstport = dstport self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.sendbuf = b"" self.recvbuf = b"" self.ver_send = 209 self.ver_recv = 209 self.last_sent = 0 self.state = "connecting" self.network = net self.cb = callback self.disconnect = False self.nServices = 0 # stuff version msg into sendbuf vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.send_message(vt, True) print('MiniNode: Connecting to Linuxcoin Node IP # ' + dstaddr + ':' \ + str(dstport)) try: self.connect((dstaddr, dstport)) except: self.handle_close() self.rpc = rpc def show_debug_msg(self, msg): self.log.debug(msg) def handle_connect(self): self.show_debug_msg("MiniNode: Connected & Listening: \n") self.state = "connected" def handle_close(self): self.show_debug_msg("MiniNode: Closing Connection to %s:%d... " % (self.dstaddr, self.dstport)) self.state = "closed" self.recvbuf = b"" self.sendbuf = b"" try: self.close() except: pass self.cb.on_close(self) def handle_read(self): try: t = self.recv(8192) if len(t) > 0: self.recvbuf += t self.got_data() except: pass def readable(self): return True def writable(self): with mininode_lock: length = len(self.sendbuf) return (length > 0) def handle_write(self): with mininode_lock: try: sent = self.send(self.sendbuf) except: self.handle_close() return self.sendbuf = self.sendbuf[sent:] def got_data(self): try: while True: if len(self.recvbuf) < 4: return if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: raise ValueError("got garbage %s" % repr(self.recvbuf)) if self.ver_recv < 209: if len(self.recvbuf) < 4 + 12 + 4: return command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] checksum = None if len(self.recvbuf) < 4 + 12 + 4 + msglen: return msg = self.recvbuf[4+12+4:4+12+4+msglen] self.recvbuf = self.recvbuf[4+12+4+msglen:] else: if len(self.recvbuf) < 4 + 12 + 4 + 4: return command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] checksum = self.recvbuf[4+12+4:4+12+4+4] if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: return msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] th = sha256(msg) h = sha256(th) if checksum != h[:4]: raise ValueError("got bad checksum " + repr(self.recvbuf)) self.recvbuf = self.recvbuf[4+12+4+4+msglen:] if command in self.messagemap: f = BytesIO(msg) t = self.messagemap[command]() t.deserialize(f) self.got_message(t) else: self.show_debug_msg("Unknown command: '" + command + "' " + repr(msg)) except Exception as e: print('got_data:', repr(e)) # import traceback # traceback.print_tb(sys.exc_info()[2]) def send_message(self, message, pushbuf=False): if self.state != "connected" and not pushbuf: raise IOError('Not connected, no pushbuf') self.show_debug_msg("Send %s" % repr(message)) command = message.command data = message.serialize() tmsg = self.MAGIC_BYTES[self.network] tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack("<I", len(data)) if self.ver_send >= 209: th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data with mininode_lock: self.sendbuf += tmsg self.last_sent = time.time() def got_message(self, message): if message.command == b"version": if message.nVersion <= BIP0031_VERSION: self.messagemap[b'ping'] = msg_ping_prebip31 if self.last_sent + 30 * 60 < time.time(): self.send_message(self.messagemap[b'ping']()) self.show_debug_msg("Recv %s" % repr(message)) self.cb.deliver(self, message) def disconnect_node(self): self.disconnect = True class NetworkThread(Thread): def run(self): while mininode_socket_map: # We check for whether to disconnect outside of the asyncore # loop to workaround the behavior of asyncore when using # select disconnected = [] for fd, obj in mininode_socket_map.items(): if obj.disconnect: disconnected.append(obj) [ obj.handle_close() for obj in disconnected ] asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) # An exception we can raise if we detect a potential disconnect # (p2p or rpc) before the test is complete class EarlyDisconnectError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value)
0.001857
import simplejson as json from simplejson import loads import math import time from datetime import datetime from random import randint import re # START CONSTANTS PI = math.pi urlRegex = re.compile( r'^https?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) # END CONSTANTS # START UTILITY FUNCTIONS def randomInt(inclusiveMin, exlusiveMax): return randint(inclusiveMin, exlusiveMax-1) def readJsonFile(filename): return json.load(open(filename)) def readTextFile(filename): return open(filename, "r") def printJsonObj(obj, fname="", i=2): if obj is None: print "JSON object is None" return 1 filename = fname outputString = json.dumps(obj, sort_keys=True, indent=i*" ") if filename is "": # standard output print(outputString) return 0 else: # output file if not filename.endswith(".json"): filename = filename + ".json" f = open(filename, "w") f.write(outputString) f.close() return 0 def outputString(content, filename=""): if filename=="": print(content) else: f = open(filename, "w") f.write(content) f.close() def search(listOfDict, val): for dictionary in listOfDict: # for key in dictionary: if dictionary["username"]==val: return dictionary return None def binarySearch(theList, value, low, high): if high < low: return None # indicates the value isn't in theList mid = int(math.floor((low + high)/2)) if theList[mid] > value: return binarySearch(theList, value, low, mid-1) elif theList[mid] < value: return binarySearch(theList, value, mid+1, high) else: return mid def listHas(theList, target): val = binarySearch(theList, target, 0, len(theList)-1) if val is None: return False else: return True def isURL(url): if urlRegex.match(url) is None: return False else: return True def getTimeString(): return time.strftime("%H:%M:%S") def bbLog(s): print "#BikeBit [" + getTimeString() + "] - " + s # END UTILITY FUNCTIONS
0.019581
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PySncosmo(PythonPackage): """SNCosmo is a Python library for high-level supernova cosmology analysis.""" homepage = "http://sncosmo.readthedocs.io/" url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz" version('1.2.0', sha256='f3969eec5b25f60c70418dbd64765a2b4735bb53c210c61d0aab68916daea588') # Required dependencies # py-sncosmo binaries are duplicates of those from py-astropy extends('python', ignore=r'bin/.*') depends_on('py-setuptools', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('py-scipy', type=('build', 'run')) depends_on('py-astropy', type=('build', 'run')) # Recommended dependencies depends_on('py-matplotlib', type=('build', 'run')) depends_on('py-iminuit', type=('build', 'run')) depends_on('py-emcee', type=('build', 'run')) depends_on('py-nestle', type=('build', 'run'))
0.000883
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from unittest import mock from parameterized import parameterized from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import GcpTransferOperationStatus from airflow.providers.google.cloud.sensors.cloud_storage_transfer_service import ( CloudDataTransferServiceJobStatusSensor, ) TEST_NAME = "transferOperations/transferJobs-123-456" TEST_COUNTERS = { "bytesFoundFromSource": 512, "bytesCopiedToSink": 1024, } class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase): @mock.patch( 'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook' ) def test_wait_for_status_success(self, mock_tool): operations = [ { 'name': TEST_NAME, 'metadata': { 'status': GcpTransferOperationStatus.SUCCESS, 'counters': TEST_COUNTERS, }, } ] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.return_value = True op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.return_value.list_transfer_operations.assert_called_once_with( request_filter={'project_id': 'project-id', 'job_names': ['job-name']} ) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch( 'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook' ) def test_wait_for_status_success_default_expected_status(self, mock_tool): op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=mock.ANY, expected_statuses={GcpTransferOperationStatus.SUCCESS} ) self.assertTrue(result) @mock.patch( 'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook' ) def test_wait_for_status_after_retry(self, mock_tool): operations_set = [ [ { 'name': TEST_NAME, 'metadata': { 'status': GcpTransferOperationStatus.SUCCESS, 'counters': TEST_COUNTERS, }, }, ], [ { 'name': TEST_NAME, 'metadata': { 'status': GcpTransferOperationStatus.SUCCESS, 'counters': TEST_COUNTERS, }, }, ], ] mock_tool.return_value.list_transfer_operations.side_effect = operations_set mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=GcpTransferOperationStatus.SUCCESS, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[0], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) mock_tool.operations_contain_expected_statuses.reset_mock() result = op.poke(context) self.assertTrue(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations_set[1], expected_statuses={GcpTransferOperationStatus.SUCCESS} ) @parameterized.expand( [ (GcpTransferOperationStatus.SUCCESS, {GcpTransferOperationStatus.SUCCESS}), ({GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS}), ( {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, {GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.SUCCESS}, ), ] ) @mock.patch( 'airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook' ) def test_wait_for_status_normalize_status(self, expected_status, received_status, mock_tool): operations = [ { 'name': TEST_NAME, 'metadata': { 'status': GcpTransferOperationStatus.SUCCESS, 'counters': TEST_COUNTERS, }, } ] mock_tool.return_value.list_transfer_operations.return_value = operations mock_tool.operations_contain_expected_statuses.side_effect = [False, True] op = CloudDataTransferServiceJobStatusSensor( task_id='task-id', job_name='job-name', project_id='project-id', expected_statuses=expected_status, ) context = {'ti': (mock.Mock(**{'xcom_push.return_value': None}))} result = op.poke(context) self.assertFalse(result) mock_tool.operations_contain_expected_statuses.assert_called_once_with( operations=operations, expected_statuses=received_status )
0.003195
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP S.A. (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class ir_model_access(osv.Model): _inherit = 'ir.model.access' # overload group_names_with_access() to avoid returning sharing groups # by filtering out groups with share=true. def group_names_with_access(self, cr, model_name, access_mode): """Returns the names of visible groups which have been granted ``access_mode`` on the model ``model_name``. :rtype: list """ assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode cr.execute('''SELECT c.name, g.name FROM ir_model_access a JOIN ir_model m ON (a.model_id=m.id) JOIN res_groups g ON (a.group_id=g.id) LEFT JOIN ir_module_category c ON (c.id=g.category_id) WHERE m.model=%s AND a.active IS true AND (g.share IS NULL or g.share IS false) AND a.perm_''' + access_mode, (model_name,)) return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
0.003081
from twisted.internet import reactor from twisted.internet.protocol import ReconnectingClientFactory from twisted.protocols.basic import Int32StringReceiver from carbon.conf import settings from carbon import log try: import cPickle as pickle except ImportError: import pickle MAX_DATAPOINTS_PER_MESSAGE = settings.MAX_DATAPOINTS_PER_MESSAGE def connect(host, port): global connectionFactory connectionFactory = MetricSenderFactory(host, port) reactor.connectTCP(host, port, connectionFactory) def send_metric(metric, datapoint): connectionFactory.send(metric, datapoint) class MetricPickleSender(Int32StringReceiver): def connectionMade(self): self.paused = False self.transport.registerProducer(self, streaming=True) self.flushQueue() def pauseProducing(self): self.paused = True def resumeProducing(self): self.paused = False self.flushQueue() def stopProducing(self): self.transport.loseConnection() def flushQueue(self): while (not self.paused) and self.queue: datapoints = self.queue[:MAX_DATAPOINTS_PER_MESSAGE] self.queue = self.factory.queue = self.queue[MAX_DATAPOINTS_PER_MESSAGE:] self.sendString( pickle.dumps(datapoints, protocol=-1) ) def send(self, metric, datapoint): if self.paused: self.queue.append( (metric, datapoint) ) elif self.queue: self.queue.append( (metric, datapoint) ) self.flushQueue() else: datapoints = [ (metric, datapoint) ] self.sendString( pickle.dumps(datapoints, protocol=-1) ) class MetricSenderFactory(ReconnectingClientFactory): connectedProtocol = None maxDelay = 10 def __init__(self, host, port): self.host = host self.port = port self.remoteAddr = "%s:%d" % (host, port) self.queue = [] def startedConnecting(self, connector): log.aggregator('connecting to %s' % self.remoteAddr) def buildProtocol(self, addr): log.aggregator('connection to %s established' % self.remoteAddr) self.connectedProtocol = MetricPickleSender() self.connectedProtocol.factory = self self.connectedProtocol.queue = self.queue return self.connectedProtocol def send(self, metric, datapoint): if len(self.queue) >= settings.MAX_QUEUE_SIZE: log.aggregator('send queue full for %s, dropping data' % self.remoteAddr) elif self.connectedProtocol: self.connectedProtocol.send(metric, datapoint) else: self.queue.append( (metric, datapoint) ) def clientConnectionLost(self, connector, reason): ReconnectingClientFactory.clientConnectionLost(self, connector, reason) self.connectedProtocol = None log.aggregator("connection to %s lost: %s" % (self.remoteAddr, reason.value)) def clientConnectionFailed(self, connector, reason): ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) log.aggregator("connection attempt to %s failed: %s" % (self.remoteAddr, reason.value))
0.015244
#!/usr/bin/env python # This file is part of VoltDB. # Copyright (C) 2008-2017 VoltDB Inc. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import decimal import os.path import re import random import time import datetime from math import ceil from sys import maxint from voltdbclient import * # for VoltDB types from optparse import OptionParser # for use in standalone test mode # Need these to print non-ascii characters: import codecs import sys UTF8Writer = codecs.getwriter('utf8') sys.stdout = UTF8Writer(sys.stdout) COUNT = 2 # number of random values to generate by default ALLOW_SELF_JOIN = True def field_name_generator(): i = 0 while True: yield "{field_%d}" % (i) i += 1 fn_generator = field_name_generator() class IntValueGenerator: """This is the base generator for integers. Typically, integer values would be specified as, e.g., _value[int:0,10], which would yield a small number of random values (two, by default) between 0 and 10 (inclusive). However, it is also possible to specify a count, e.g., _value[int:0,10;5] would yield five random values between 0 and 10; or you can specify a step, e.g., _value[int:0,10,2] (note the comma, rather than semi-colon) would yield the (non-random) values 0, 2, 4, 6, 8, 10; or, you may specify both, e.g., _value[int:6,12,3;9] would yield the 9 (non-random) values 6, 9, 12, 8, 11, 7, 10, 6, 9; notice how the values increase by 3, but cycle around via the mod (%) operator, always between the specified min of 6 and max of 12. It is also possible to specify the type of integer you want, i.e.: _value[byte], _value[int16], _value[int32], or _value[int64], though in that case you will always get random values, whose min and max values are defined for you. You may also specify a null percentage, e.g., _value[byte null25] will yield random byte values (between -127 and 127, inclusive), with a 25% chance of being null. """ def __init__(self): self.__min = -maxint - 1 self.__max = maxint self.__step = 0 self.__count = 0 self.__nullpct = 0 def set_min_max(self, min, max, step=0): self.__min = int(min) self.__max = int(max) self.__step = int(step) # If step is specified by count is not, set it large enough to cover # the range between min and max, with the given step size if step and not self.__count: self.__count = int(ceil( (self.__max + 1.0 - self.__min) / self.__step )) def set_count(self, count): self.__count = int(count) def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count): for i in xrange(max(count, self.__count)): if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None # If the step was specified, return non-random integer values, # starting with the min, ending with the max, increasing by the # step, and cycling around via the mod (%) operator, if the count # requires additional values elif self.__step: yield self.__min + ((i*self.__step) % (self.__max+1 - self.__min)) else: yield random.randint(self.__min, self.__max) class ByteValueGenerator(IntValueGenerator): """This generates bytes. """ def __init__(self): IntValueGenerator.__init__(self) self.set_min_max(-127, 127) class Int16ValueGenerator(IntValueGenerator): """This generates 16-bit integers. """ def __init__(self): IntValueGenerator.__init__(self) self.set_min_max(-32767, 32767) class Int32ValueGenerator(IntValueGenerator): """This generates 32-bit integers. """ def __init__(self): IntValueGenerator.__init__(self) self.set_min_max(-2147483647, 2147483647) class Int64ValueGenerator(IntValueGenerator): """This generates 64-bit integers. """ def __init__(self): IntValueGenerator.__init__(self) self.set_min_max(-9223372036854775807, 9223372036854775807) class FloatValueGenerator: """This generates 64-bit float. """ def __init__(self): self.__nullpct = 0 def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count): for i in xrange(count): if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: yield random.random() class DecimalValueGenerator: """This generates Decimal values. """ def __init__(self): # currently VoltDB values support 12 digits of precision. # generate constant values to 3 digits of precision to give HSQL room # to do exact math (multiplications) within 12 bits of precision. # Otherwise, it complains rather than rounding. decimal.getcontext().prec = 3 self.__nullpct = 0 def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count): for i in xrange(count): # we support 7 digits of scale, so magnify those tiny floats if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: yield decimal.Decimal(str(random.random() * 100.00)) class PointValueGenerator: """This generates (random) point (GEOGRAPHY_POINT) values. """ # It's annoying to have random numbers with 12 digits, so we limit it to # a small number beyond the decimal point DIGITS_BEYOND_DECIMAL_POINT = 2 def __init__(self): decimal.getcontext().prec = PointValueGenerator.DIGITS_BEYOND_DECIMAL_POINT self.__nullpct = 0 # By default, random points can be anywhere on Earth self.set_min_max(-180.0, 180.0, -90.0, 90.0) def set_min_max(self, longmin, longmax, latmin=None, latmax=None): self.__longmin = float(longmin) self.__longdiff = float(longmax) - self.__longmin if latmin is not None: self.__latmin = float(latmin) else: self.__latmin = self.__longmin if latmax is not None: self.__latdiff = float(latmax) - self.__latmin else: self.__latdiff = float(longmax) - self.__latmin def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count): for i in xrange(count): if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: longitude = round(self.__longmin + (self.__longdiff * random.random()), PointValueGenerator.DIGITS_BEYOND_DECIMAL_POINT) latitude = round(self.__latmin + (self.__latdiff * random.random()), PointValueGenerator.DIGITS_BEYOND_DECIMAL_POINT) yield "pointFromText('POINT ("+str(longitude)+" "+str(latitude)+")')" class PolygonValueGenerator: """This generates (random) polygon (GEOGRAPHY) values. """ # It's annoying to have random numbers with 12 digits, so we limit it to # a reasonable number beyond the decimal point, but not too small since # too much rounding can cause invalid polygons DIGITS_BEYOND_DECIMAL_POINT = 6 def __init__(self): decimal.getcontext().prec = PolygonValueGenerator.DIGITS_BEYOND_DECIMAL_POINT self.__nullpct = 0 # A negative value indicates a random number of holes (interior rings), # with the number of holes ranging between 0 and the absolute value of # the given number; so, in this case, -4 means between 0 and 4 holes. self.__num_holes = -4 # By default, polygons are restricted to be somewhere within Colorado, # since it has a nice, square-ish shape; and a polygon with random # vertices covering the entire Earth would not make a lot of sense. # (Note: this an approximate version of Colorado, since it does not # take into account that latitude lines are not great circles.) self.set_min_max(-109.05, -102.05, 37.0, 41.0) def set_min_max(self, longmin, longmax, latmin=None, latmax=None): self.__longmin = float(longmin) self.__longmax = float(longmax) if latmin is not None: self.__latmin = float(latmin) else: self.__latmin = self.__longmin if latmax is not None: self.__latmax = float(latmax) else: self.__latmax = self.__longmax def set_nullpct(self, nullpct): self.__nullpct = nullpct def set_count(self, num_holes): self.__num_holes = int(num_holes) def generate_vertex(self, longmin, longmax, latmin, latmax): """Generates a point that can be used as the vertex of a polygon, at a random location in between the specified minimum and maximum longitude and latitude values, with a small buffer so that it is not right up against the edge. """ delta = longmax - longmin longitude = round(longmin + (0.1 * delta) + (0.8 * delta * random.random()), PolygonValueGenerator.DIGITS_BEYOND_DECIMAL_POINT) delta = latmax - latmin latitude = round(latmin + (0.1 * delta) + (0.8 * delta * random.random()), PolygonValueGenerator.DIGITS_BEYOND_DECIMAL_POINT) return str(longitude)+" "+str(latitude) def generate_loop(self, longmin, longmax, latmin, latmax, clockwise=False): """Generates a loop, which can be used as the exterior ring or an interior ring (i.e., a hole) of a polygon, with between 4 and 8 vertices, at random locations in between the specified minimum and maximum longitude and latitude values; clockwise=True should be used if and only if this is an interior ring. """ # Divide the specified region up into a 3x3 grid of 9 roughly equal spaces, # like a tic-tac-toe board, but leave out the middle space, which can be # used later for holes, if this is an exterior ring. Start in the lower left # space (or "octant", since there are 8 of them without the middle), and # move counter-clockwise (the default) or clockwise until you reach the # lower left space again. In the corner spaces, you always choose a random # vertex; but the "middle" spaces are optional: you randomly decide (50-50) # whether to specify a vertex there or not. # The first octant, [0, 0], is omitted here because it is dealt with # specially, being both the first and last vertex octants = [[1, 0], [2, 0], [2, 1], [2, 2], [1, 2], [0, 2], [0, 1]] if clockwise: octants.reverse() long_delta = (longmax - longmin) / 3.0 lat_delta = (latmax - latmin ) / 3.0 first_and_last_vertex = self.generate_vertex(longmin, longmin+long_delta, latmin, latmin+lat_delta) loop = '(' + first_and_last_vertex + ', ' for oct in range(len(octants)): i, j = octants[oct][0], octants[oct][1] # vertices in the "middle" octants are optional (unlike the corners) if i == 1 or j == 1 and random.randint(0, 100) < 50: continue loop += self.generate_vertex(longmin+i*long_delta, longmin+(i+1)*long_delta, latmin+j*lat_delta, latmin+(j+1)*lat_delta) + ', ' return loop + first_and_last_vertex + ')' def generate_values(self, count): """Generates a polygon, whose first loop is always a counter-clockwise exterior ring, with vertices at random locations in between the specified minimum and maximum longitude and latitude values; there may or may not be additional loops which represent clockwise interior rings, i.e., holes. Holes are specified as being within one of 4 quadrants of the middle "space" (see generate_loop above) of the exterior ring. More than 4 holes is not recommended, as they will start to overlap, causing an invalid polygon. """ quadrants = [[0, 0], [1, 0], [1, 1], [0, 1]] for n in xrange(count): if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: polygon = "polygonFromText('POLYGON (" + self.generate_loop(self.__longmin, self.__longmax, self.__latmin, self.__latmax) num_holes = self.__num_holes if num_holes < 0: num_holes = random.randint(0, -num_holes) if num_holes: long_delta = (self.__longmax - self.__longmin) / 6.0 lat_delta = (self.__latmax - self.__latmin ) / 6.0 longmin = self.__longmin + 2*long_delta latmin = self.__latmin + 2*lat_delta for h in range(num_holes): i, j = quadrants[h%4][0], quadrants[h%4][1] polygon += ', ' + self.generate_loop(longmin+i*long_delta, longmin+(i+1)*long_delta, latmin+j*lat_delta, latmin+(j+1)*lat_delta, True) yield polygon + ")')" class StringValueGenerator: """This generates strings. """ # Define the ASCII-only alphabet, to be used to generate strings ALPHABET = u"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" # For the extended, non-ASCII alphabet, add the letter e with various accents EXTENDED_ALPHABET = ALPHABET + u'\u00e9\u00e8\u00ea\u00eb' # Add some (upper & lower case) Greek characters (that do not resemble Latin letters) EXTENDED_ALPHABET += u'\u0393\u0394\u03b1\u03b2' # Add some (upper & lower case) Cyrillic (e.g. Russian) characters (that do not resemble Latin letters) EXTENDED_ALPHABET += u'\u0429\u042F\u0449\u044F' # Add some Japanese characters (which together mean 'frog') EXTENDED_ALPHABET += u'\u30ab\u30a8\u30eb' # Add some (simplified) Chinese characters (which together mean 'frog') EXTENDED_ALPHABET += u'\u9752\u86d9' # Initial, default value __ascii_only = False @staticmethod def set_ascii_only(ascii_only): StringValueGenerator.__ascii_only = ascii_only @staticmethod def get_alphabet(): if StringValueGenerator.__ascii_only: return StringValueGenerator.ALPHABET else: return StringValueGenerator.EXTENDED_ALPHABET def __init__(self): self.__nullpct = 0 def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count, length = 14): for i in xrange(count): list = [random.choice(StringValueGenerator.get_alphabet()) for y in xrange(length)] if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: yield u"".join(list) class VarbinaryValueGenerator: """This generates byte strings expressed as pairs of hex digits. """ HEXDIGIT = u"0123456789ABCDEF" def __init__(self): self.__nullpct = 0 def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count, length = 17): for i in xrange(count): list = [random.choice(VarbinaryValueGenerator.HEXDIGIT) for y in xrange(length*2)] # length*2 hex digits gives whole bytes if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: yield u"".join(list) class TimestampValueGenerator: """This generates timestamps in a reasonable range. """ #The MIN_MILLIS_SINCE_EPOCH is the lower bound of the generator, and its timestamp is #1843-03-31 11:57:18.000000. The MAX_MILLIS_SINCE_EPOCH is the upper bound of the generator, #and its timestamp is 2027-01-15 03:00:00.000000. Negative number is to generate timestamp #prior to the unix epoch. MIN_MILLIS_SINCE_EPOCH = -3000000000 MAX_MILLIS_SINCE_EPOCH = 1800000000 def __init__(self): self.__nullpct = 0 def set_nullpct(self, nullpct): self.__nullpct = nullpct def generate_values(self, count): for i in xrange(count): if self.__nullpct and (random.randint(0, 100) < self.__nullpct): yield None else: r = random.uniform(TimestampValueGenerator.MIN_MILLIS_SINCE_EPOCH, TimestampValueGenerator.MAX_MILLIS_SINCE_EPOCH) ts = datetime.datetime.fromtimestamp(r) #The format is YYYY-MM-DD HH:MM:SS.mmmmmm s = ts.isoformat(' ') #According to the python document, the datetime.isoformat() will not show #microsecond "mmmmmm" if datetime.microsecond is 0. So here we manually add #trailing zeros if datetime.microsecond is 0. #(https://docs.python.org/2/library/datetime.html) if ts.microsecond == 0: s += '.000000' #HSQL's resolution is millisecond while VoltDB's is microsecond. We rounded #the timestamp down to millisecond so that both databases store the same data. s = s[:-3]+'000' yield s class BaseGenerator: """This is the base class for all non-value generators (operator generator, column generator, etc.). """ def __init__(self, token): global fn_generator self.__token = token self.fn_gen = fn_generator self.__fn = None self.__label = None self.values = [] self.reserved_value = None self.prior_generator = None # For now, all generators use the same pattern to capture generator attributes, # even though most of them end up ignoring the attributes that don't affect them. # Some attributes are very general, like the label, while others like min and max apply very narrowly # (currently to numeric value generators). # The named attribute facility allows arbitrary named attributes to be specified as # "<name1=value1 name2=value2>" with no embedded spaces allowed except to separate attributes (as shown). # Generators are free to honor or ignore any such attributes. # For labeled tokens, like "_X[#Y...]", attributes are only processed on the first occurrence of each unique # token/label combination in a statement and ignored on other occurrences. # INDF = r"IS\s+(NOT\s+)?DISTINCT\s+FROM" # token (starting with '_') # | optional attribute section between []s # | | LABEL_PATTERN_GROUP = "label" # optional label for variables # | | | TYPE_PATTERN_GROUP = "type" # optional type for columns, values # | | | | MIN_VALUE_PATTERN_GROUP = "min" # optional min (only for numeric values) # | | | | | MAX_VALUE_PATTERN_GROUP = "max" # optional max (only for numeric values) # | | | | | | __EXPR_TEMPLATE = r"%s" r"(\[\s*" r"(#(?P<label>\w+)\s*)?" r"(?P<type>\w+|"+INDF+r"|[=<>!]{1,2})?\s*" r"(:(?P<min>(-?\d*\.?\d*)),(?P<max>(-?\d*\.?\d*))" \ r"(,(?P<latmin>(-?\d*\.?\d*))(,(?P<latmax>(-?\d*\.?\d*)))?)?)?(;(?P<numholes>(-?\d+)))?\s*" r"(null(?P<nullpct>(\d*)))?" r"\])?" # | | | | | # | | | | end of [] attribute section NULL_PCT_PATTERN_GROUP = "nullpct" # optional null percentage # | | | NUM_HOLES_PATTERN_GROUP = "numholes" # number of holes (for polygon values); or the count (for int values) # | | MAX_LAT_PATTERN_GROUP = "latmax" # optional latitude max (only for geo values) # | MIN_LAT_PATTERN_GROUP ="latmin" # optional latitude min (for geo values); or the step (for int values) # A simpler pattern with no group capture is used to find recurrences of (references to) definition # patterns elsewhere in the statement, identified by label. # These can either be token-type-specific, like "_variable[#number_col]" or generic equivalents # like "__[#number_col]" either of which would match a prior "_variable[#number_col int]". # Since the "__" syntax never introduces a definition, it is convenient for use as a forward # reference to a definition provided later on in the statement. # token (starting with '_') or just '__' to match/reuse ANY token # | '[' required # | | matching label, required # | | | attributes or other garbage optional/ignored # | | | | final ']' required # | | | | | __RECURRENCE_TEMPLATE = r"(%s|__)\[\s*#%s[^\]]*\]" # List of column names for Geo types, i.e., point and polygon (GEOGRAPHY_POINT and GEOGRAPHY), # which may need to be wrapped in AsText(...) __GEO_COLUMN_NAMES = ['PT1', 'PT2', 'PT3', 'POLY1', 'POLY2', 'POLY3'] # List of possible prefixes for those column names, i.e., either a table name alias with '.', # or nothing at all; the empty one (no table name prefix) must be last __GEO_COLUMN_PREFIXES = ['A.', 'B.', 'LHS.', ''] # List of Geo functions, which indicate that the Geo column is already appropriately # wrapped, so you don't need to add AsText(...) __GEO_FUNCTION_NAMES = ['AREA', 'ASTEXT', 'CAST', 'CENTROID', 'CONTAINS', 'COUNT', 'DISTANCE', 'DWITHIN', 'ISVALID', 'ISINVALIDREASON', 'LATITUDE', 'LONGITUDE', 'NUMINTERIORRINGS', 'NUMPOINTS'] # Similar list, of Geo functions with two arguments __GEO_FUNCS_W2_ARGS = ['CONTAINS', 'DISTANCE', 'DWITHIN'] @classmethod def _expr_builder(cls, tag): return re.compile(cls.__EXPR_TEMPLATE % (tag)) @classmethod def _recurrence_builder(cls, tag, label): ### print "DEBUG: recurrence template: " + (cls.__RECURRENCE_TEMPLATE % (tag, label)) return re.compile(cls.__RECURRENCE_TEMPLATE % (tag, label)) def generate_statements(self, statement): """statement is an sql statement pattern which still needs some field name resolution. globally substitute each of the generator's candidate parameters. """ for i in self.next_param(): yield statement.replace(self.__fn, i) @classmethod def wrap_astext_around_geo_columns_in_fragment(cls, statement_fragment): """ In the specified partial SQL statement, or fragment, wrap AsText(...) around Geo types (point and polygon, i.e., GEOGRAPHY_POINT and GEOGRAPHY), but only if it is not already wrapped in one of the Geo functions, e.g., AsText(PT1), LONGITUDE(PT1), AREA(POLY1), DISTANCE(PT2,POLY3), etc. """ result = statement_fragment statement_fragment_upper = statement_fragment.upper().replace(' ', '') for col in BaseGenerator.__GEO_COLUMN_NAMES: if col in statement_fragment_upper: found = False for tbl in BaseGenerator.__GEO_COLUMN_PREFIXES: # Do not sub for empty column prefix (i.e., table # name), if already handled a non-empty one if found and not tbl: break if tbl+col in statement_fragment_upper: found = True if not any(f+'('+tbl+col in statement_fragment_upper for f in BaseGenerator.__GEO_FUNCTION_NAMES) and \ not any(f+'('+t+c+','+tbl+col in statement_fragment_upper for f in BaseGenerator.__GEO_FUNCS_W2_ARGS for t in BaseGenerator.__GEO_COLUMN_PREFIXES for c in BaseGenerator.__GEO_COLUMN_NAMES): result = result.replace(tbl+col, 'AsText('+tbl+col+')') ### print "DEBUG: Modified fragment : ", result return result @classmethod def wrap_astext_around_geo_columns(cls, statement): """ Cannot compare Geo types (point and polygon, i.e., GEOGRAPHY_POINT and GEOGRAPHY) against PostGIS, so, in a SELECT statement, we have to get them in text form, instead; e.g., replace 'PT1' with AsText(PT1) or 'A.POLY1' with AsText(A.POLY1), but only in the part of a SELECT statement before 'FROM', or after 'ORDER BY' (or between the 'THEN' part of a CASE statement and a FROM that comes after it), and only if it is not already wrapped in one of the Geo functions, e.g., AsText(PT1), LONGITUDE(PT1), AREA(POLY1), DISTANCE(PT2,POLY3), etc. (Note: this works for the CASE statements currently used in SQLCoverage, but may not for all possible CASE statements.) """ result = statement statement_upper = statement.upper() if statement_upper.startswith('SELECT') and any(x in statement for x in BaseGenerator.__GEO_COLUMN_NAMES): # Normally, we will wrap AsText(...) around geo columns before FROM or after ORDER BY wrap_before_index = statement_upper.find(' FROM ') wrap_after_index = statement_upper.find(' ORDER BY ') wrap_between_index = -1 # Special case for handling a CASE statement if (' CASE ' in statement_upper and ' WHEN ' in statement_upper and ' THEN ' in statement_upper and ' END ' in statement_upper): then_index = statement_upper.find(' THEN ') # When FROM comes after CASE/THEN, wrap AsText(...) around # columns that come before CASE or between THEN and FROM if wrap_before_index > then_index: wrap_between_index = wrap_before_index wrap_before_index = statement_upper.find(' CASE ') if wrap_after_index > 0: before_text = result[0:wrap_after_index] after_text = result[wrap_after_index:] result = before_text + BaseGenerator.wrap_astext_around_geo_columns_in_fragment(after_text) if wrap_between_index > 0: before_text = result[0:then_index] between_text = result[then_index:wrap_between_index] after_text = result[wrap_between_index:] result = before_text + BaseGenerator.wrap_astext_around_geo_columns_in_fragment(between_text) + after_text if wrap_before_index > 0: before_text = result[0:wrap_before_index] after_text = result[wrap_before_index:] result = BaseGenerator.wrap_astext_around_geo_columns_in_fragment(before_text) + after_text return result @classmethod def prepare_generators(cls, statement, schema, generator_types): """prepare fields and generators for each generator pattern in statement. """ new_generators = [] field_map = {} # no table generator yet prior_generators = {} for ctor in generator_types: while True: another_gen = ctor() rewrite, field_name = another_gen.prepare_fields(statement) if rewrite: prior_generators = another_gen.configure_from_schema(schema, prior_generators) field_map[field_name] = another_gen ### print "DEBUG field_map[" + field_name + "] got " + another_gen.debug_gen_to_string() new_generators.append(another_gen) statement = rewrite else: break return statement, new_generators, field_map def configure_from_schema(self, schema, unchanged_prior_generators): """ The generator class (unlike ColumnGenerator/TableGenerator) may not be affected by schema """ return unchanged_prior_generators @classmethod def generate_statements_from_list(cls, stmt, generators, field_map): """A utility that generates multiple statement strings by substituting a set of values for each specially marked field in the input string, resulting in all the possible combinations. Each generator is responsible for recognizing its unique field mark and providing its set of substitutions. """ ###TODO: Use the currently ignored field_map or build a field-to-value map dynamically to ### divorce value combinatorics from statement re-write. if generators: # apply the next generator for generated_stmt in generators[0].generate_statements(stmt): # apply the remaining generators for complete_statement in BaseGenerator.generate_statements_from_list(generated_stmt, generators[1:], field_map): yield complete_statement else: # Saw the last generator, statement should be complete; now, make # sure Geo column types (point and polygon, i.e., GEOGRAPHY_POINT # and GEOGRAPHY) are not in a SELECT list (or ORDER BY) without # AsText, or some other function, wrapped around them yield BaseGenerator.wrap_astext_around_geo_columns(stmt) def prepare_fields(self, statement): """replace with a unique field name a definition with the generator's token and any (like-labeled) occurrences. Call prepare_params to initialize the generator with parameters from its definition. Return the modified statement, or None if there is no matching token in the statement. """ # match the token and capture all of its attributes definition = self._expr_builder(self.__token); match = definition.search(statement) # Stop when statement does not have any (more) of these tokens. if not match: return None, None # Process the label, the one universally applicable attribute self.__label = match.group(BaseGenerator.LABEL_PATTERN_GROUP) ### print "DEBUG: prepare_fields found " + self.__token + "[#" + ( self.__label or "" ) + "]" + " IN " + statement # Replace the definition with a generated unique field name self.__fn = self.fn_gen.next() rewrite = definition.sub(self.__fn, statement, 1) # Dispatch to let the specific generator class deal with any custom attributes self.prepare_params(match.groupdict()) # Anything with a label can recur, replace recurrences with the same field name. if self.__label: recurrence = self._recurrence_builder(self.__token, self.__label) rewrite = recurrence.sub(self.__fn, rewrite, 0) ### print "DEBUG: prepare_fields after " + self.__token + "[#" + ( self.__label or "" ) + "]" + " IN " + rewrite return rewrite, self.__fn def prepare_params(self, attribute_groups): """ abstract method implemented by all derived classes """ pass def next_param(self): for value in self.values: if self.prior_generator: if self.prior_generator.has_reserved(value): continue # To avoid self-join and other kinds of redundancy, don't reuse values. self.reserved_value = value yield value def has_reserved(self, name): if name == self.reserved_value: return True if not self.prior_generator: return False return self.prior_generator.has_reserved(name) def debug_gen_to_string(self): result = "generator: " + self.__token + " VALUES: " for val in self.values: result += val + ", " if self.reserved_value: result += "reserved: " + self.reserved_value return result class TableGenerator(BaseGenerator): """This replaces occurrences of token "_table" with a schema table name. For each statement, each of the tables from the current schema are bound to one of these _table generators in sequence to purposely avoid accidental repeats (self-joins). Occurrences of the same table name within a statement should be worked around via SQL aliases. """ def __init__(self): BaseGenerator.__init__(self, "_table") def configure_from_schema(self, schema, prior_generators): self.values = schema.get_tables() self.prior_generator = prior_generators.get("table") prior_generators["table"] = self # new table generator at the head of the chain return prior_generators def has_reserved(self, name): if ALLOW_SELF_JOIN: return False return super().has_reserved(name) class ColumnGenerator(BaseGenerator): """This replaces occurrences of token _variable with a column name. Within a statement, intended occurrences of the same column name must use the same '#label'. Attributes only matter on the first occurence of "_variable" for a given label. As a convenience, forward references can use the __[#label] syntax instead of _variable[#label] to defer locking in attribute settings until a later _variable occurrence. The column name is selected from the schema columns of any/all tables in the schema. As a result, inclusion of tables that define different column names in a single schema can result in test runs that mostly test error cases that reference undefined columns on particular tables. """ def __init__(self): BaseGenerator.__init__(self, "_variable") def prepare_params(self, attribute_groups): self.__supertype = attribute_groups[BaseGenerator.TYPE_PATTERN_GROUP] if not self.__supertype: self.__supertype = "" def configure_from_schema(self, schema, prior_generators): """ Get matching column values from schema """ self.values = schema.get_typed_columns(self.__supertype) self.prior_generator = prior_generators.get("variable") prior_generators["variable"] = self # new variable generator at the head of the chain return prior_generators class SymbolGenerator(BaseGenerator): """This replaces occurrences of token _symbol with a piece of text, such as a function name or a comparison operator. Within a statement, intended occurrences of the same symbol must use the same '#label'. Attributes only matter on the first occurrence of "_symbol" for a given label. As a convenience, forward references can use the __[#label] syntax instead of _symbol[#label] to defer locking in attribute settings until a later _symbol occurrence. """ def __init__(self): BaseGenerator.__init__(self, "_symbol") def prepare_params(self, attribute_groups): # The "TYPE_PATTERN_GROUP", which in ColumnGenerator describes the column type, # here actually refers to a symbol, which is typically a function name or a # comparison operator (including the "IS [NOT] DISTINCT FROM" operator). self.__symbol = attribute_groups[BaseGenerator.TYPE_PATTERN_GROUP] if not self.__symbol: self.__symbol = "" def configure_from_schema(self, schema, prior_generators): """ Get matching text values; does not actually use the schema. """ self.values.append(self.__symbol) self.prior_generator = prior_generators.get("symbol") prior_generators["symbol"] = self # new symbol generator at the head of the chain return prior_generators class ConstantGenerator(BaseGenerator): """This replaces a variable with an actual constant value. """ TYPES = {"int": IntValueGenerator, "byte": ByteValueGenerator, "int16": Int16ValueGenerator, "int32": Int32ValueGenerator, "int64": Int64ValueGenerator, "float": FloatValueGenerator, "string": StringValueGenerator, "varbinary": VarbinaryValueGenerator, "decimal": DecimalValueGenerator, "timestamp": TimestampValueGenerator, "point": PointValueGenerator, "polygon": PolygonValueGenerator} def __init__(self): BaseGenerator.__init__(self, "_value") self.__count = COUNT self.__type = None def prepare_params(self, attribute_groups): self.__type = attribute_groups[BaseGenerator.TYPE_PATTERN_GROUP] if not self.__type: print "Generator parse error -- invalid type:", self.__type assert self.__type min = attribute_groups[BaseGenerator.MIN_VALUE_PATTERN_GROUP] max = attribute_groups[BaseGenerator.MAX_VALUE_PATTERN_GROUP] latmin = attribute_groups[BaseGenerator.MIN_LAT_PATTERN_GROUP] latmax = attribute_groups[BaseGenerator.MAX_LAT_PATTERN_GROUP] numholes = attribute_groups[BaseGenerator.NUM_HOLES_PATTERN_GROUP] self.__value_generator = ConstantGenerator.TYPES[self.__type]() if min is not None and max is not None: if latmin is not None: if latmax is not None: self.__value_generator.set_min_max(min, max, latmin, latmax) else: self.__value_generator.set_min_max(min, max, latmin) else: self.__value_generator.set_min_max(min, max) if numholes is not None: self.__value_generator.set_count(numholes) nullpct = attribute_groups[BaseGenerator.NULL_PCT_PATTERN_GROUP] if nullpct: self.__value_generator.set_nullpct(int(nullpct)) def next_param(self): for i in self.__value_generator.generate_values(self.__count): if i == None: i = u"NULL" elif isinstance(i, basestring): # Points and polygon values do not want extra single-quotes around them if i.startswith('pointFromText(') or i.startswith('polygonFromText('): i = u"%s" % (i) # Varchar values do want single-quotes around them else: i = u"'%s'" % (i) elif isinstance(i, float): i = u"%.20e" % (i) yield unicode(i) class IdGenerator(BaseGenerator): """This replaces _id with a counter value unique to the entire run (at least unless/until reset with the 'initialize' class method). """ counter = 1 def __init__(self): BaseGenerator.__init__(self, "_id") def prepare_params(self, attribute_groups): pass def next_param(self): id = self.__class__.counter self.__class__.counter += 1 yield unicode(id) @classmethod def initialize(cls, start): cls.counter = start class LiteralGenerator: """This generates a piece of literal query text, usually as one of multiple choices for a MacroGenerator """ def __init__(self, literal): self.__literal = literal def generate_text(self): yield self.__literal class MacroGenerator: """This generates pieces of literal text chosen non-randomly in turn from a list of LiteralGenerator snippets that were added to the macro generator using the generator macro building syntax: {@name |= "one option"} {@name |= "another option"} """ def __init__(self): self.__choices = [] def add_choice(self, generator_list): self.__choices.append(generator_list) def generate_text(self): for generator_list in self.__choices: for statement in self.generate_text_from_list(generator_list): yield statement @classmethod def generate_text_from_list(cls, generator_list): if not generator_list: yield "" else: for fragment_head in generator_list[0].generate_text(): for fragment_tail in cls.generate_text_from_list(generator_list[1:]): yield fragment_head + fragment_tail class Schema: SUPERTYPES = { "byte": ("byte", "int", "numeric", ""), "int16": ("int16", "int", "numeric", ""), "int32": ("int32", "int", "numeric", ""), "int64": ("int64", "int", "numeric", ""), "float": ("float", "numeric", ""), "decimal": ("decimal", "numeric", ""), "string": ("string", "nonnumeric", ""), "varbinary": ("varbinary", "nonnumeric", ""), "timestamp": ("timestamp", "nonnumeric", ""), "point": ("point", "geo", "nonnumeric", ""), "polygon": ("polygon", "geo", "nonnumeric", ""), } TYPE_NAMES = { FastSerializer.VOLTTYPE_TINYINT: "byte", FastSerializer.VOLTTYPE_SMALLINT: "int16", FastSerializer.VOLTTYPE_INTEGER: "int32", FastSerializer.VOLTTYPE_BIGINT: "int64", FastSerializer.VOLTTYPE_FLOAT: "float", FastSerializer.VOLTTYPE_STRING: "string", FastSerializer.VOLTTYPE_VARBINARY: "varbinary", FastSerializer.VOLTTYPE_DECIMAL: "decimal", FastSerializer.VOLTTYPE_TIMESTAMP: "timestamp", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT: "point", FastSerializer.VOLTTYPE_GEOGRAPHY: "polygon", } def __init__(self, **kwargs): if 'filename' in kwargs: self.__init_from_file(kwargs['filename']) elif 'schema' in kwargs: self.__schema = kwargs['schema'] else: print "No schema provided" self.__col_by_type = {} self.__col_by_type[""] = {} self.__col_by_type["int"] = {} self.__col_by_type["geo"] = {} self.__col_by_type["numeric"] = {} self.__col_by_type["nonnumeric"] = {} # This does not refer to a column type, but to columns that are part of # the primary key, as specified by the "indexes" key, in the schema file self.__col_by_type["id"] = {} for code, supertype in Schema.TYPE_NAMES.iteritems(): self.__col_by_type[supertype] = {} for table, tabledict in self.__schema.iteritems(): for column in tabledict["columns"]: column_name = column[0]; type_name = Schema.TYPE_NAMES[column[1]] for supertype in Schema.SUPERTYPES[type_name]: # The column_name "keys" inserted here are the real data # -- the set of unique column names which by convention are usually # defined and typed identically on all of the tables in the schema. # The table value is just documentation for the curious. # It represents the last table that defined the column as # listed in the schema, so it's usually just the last table in the schema. self.__col_by_type[supertype][column_name] = table indexes = tabledict.get("indexes", None) if indexes: if isinstance(indexes, basestring): self.__col_by_type["id"][indexes] = table else: for index in indexes: self.__col_by_type["id"][index] = table def __init_from_file(self, filename): fd = open(filename, "r") self.__content = fd.read() fd.close() self.__schema = eval(self.__content.strip()) def get_tables(self): return self.__schema.keys() def get_typed_columns(self, supertype): return self.__col_by_type[supertype].keys() def debug_schema_to_string(self): result = "TABLES: " for table in self.get_tables(): result += table + ", " result += "COLUMNS: " for code, supertype in Schema.TYPE_NAMES.iteritems(): for column_name in self.get_typed_columns(supertype): result += supertype + " " + column_name + ", " return result class Template: def __init__(self, **kwargs): self.__lines = [] self.__dml = None self.__query = None if 'filename' in kwargs: self.__lines = self.__init_from_file(kwargs['filename']) elif 'lines' in kwargs: self.__lines = kwargs['lines'] else: print "No lines in template, no SQL will be generated" # Collect and filter out macro definitions self.scan_for_macros() LINE_COMMENT_PATTERN = re.compile(r'^\s*((--)|$)') # whitespace lines are comments, too INCLUDE_PATTERN = re.compile(r'^\s*<(\S+)>') def __init_from_file(self, filename): file_lines = [] if os.path.isfile(filename): fd = open(filename, "r") # If the file does not exist in the current directory, try the # adjacent 'include' directory else: fd = open('/../include/'.join(filename.rsplit('/', 1)), "r") for line in fd: if Template.LINE_COMMENT_PATTERN.search(line): # Skip classic SQL-style comments continue match = Template.INCLUDE_PATTERN.search(line) if match: include_file = match.group(1) include_lines = \ self.__init_from_file(os.path.join(os.path.dirname(filename), include_file)) file_lines.extend(include_lines) continue file_lines.append(line.strip()) fd.close() return file_lines def get_statements(self): return self.__lines MACRO_DEFINE_PATTERN = re.compile(r'{' r'(@\w+)' r'\s*=\s*' r'"(.*)"' r'\s*}' r'(\s*--.*)?$') GENERATOR_DEFINE_PATTERN = re.compile(r'{' r'(_\w+)' r'\s*\|=\s*' r'"(.*)"' r'\s*}' r'(\s*--.*)?$') def scan_for_macros(self): lines_out = [] self.__macros = {} self.__generators = {} previous_continued_line = '' for line in self.__lines: # Allow the use of '\' as a line-continuation character if previous_continued_line: line = previous_continued_line + line if line.endswith('\\'): previous_continued_line = line[:-1] + ' ' continue elif line.endswith('\\\n'): previous_continued_line = line[:-2] + ' ' continue else: previous_continued_line = '' if line.startswith('{'): match = Template.MACRO_DEFINE_PATTERN.search(line) if match: self.__macros[match.group(1)] = match.group(2) else: #Recognize and cache values for generator ("|=") macros. match = Template.GENERATOR_DEFINE_PATTERN.search(line) if match: generator_name = match.group(1) if generator_name not in self.__generators.keys(): self.__generators[generator_name] = MacroGenerator() option = self.apply_macros(match.group(2)) # Each option value gets recursively expanded here into a choice list. choice_list = self.__apply_generators(option) ### print "DEBUG:adding generator " + generator_name + " option " + option + " choice list size: ", len(choice_list) self.__generators[generator_name].add_choice(choice_list) else: print "WARNING: ignoring malformed definition: (" + line + ")" else: lines_out.append(line) self.__lines = lines_out MACRO_NAME_PATTERN = re.compile(r'@\w+') def apply_macros(self, line): pos = 0 while True: # Check for something to expand match = Template.MACRO_NAME_PATTERN.search(line[pos:]) if not match: ### print 'VERBOSE DEBUG no more macros for line "' + line + '"' return line key = match.group() # This could be a false positive. Check for exact key match sub = self.__macros.get(key, None) if sub == None: # nothing to see here. move along. pos += len(key) ### print 'VERBOSE DEBUG no macro defined for key "' + key + '"' continue pos += match.start() ### print 'VERBOSE DEBUG key "' + key + '" becomes "' + sub + '"' line = line[0:pos] + sub + line[pos+len(key):] GENERATOR_NAME_PATTERN = re.compile(r'_\w+') def __apply_generators(self, line): pos = 0 while True: # Check for something to expand match = Template.GENERATOR_NAME_PATTERN.search(line[pos:]) if not match: # The entire line represents a "choice" of one literal option. return [LiteralGenerator(line)] key = match.group() pos += match.start() # This could be a false positive. Check for exact key match choice = self.__generators.get(key, None) if choice: break # false alarm. nothing to see here. move along. pos += len(key) prefix = line[0:pos] # The prefix only has one fixed literal option. if prefix: result = [LiteralGenerator(prefix)] else: result = [] # Add the clause list or table as the next listed choice. result.append(choice) # Since options don't contain recursive unresolved references to generators # only the tail needs to be recursively processed and its resulting choice list # tacked on to the result. return result + self.__apply_generators(line[pos+len(key):]) def generate_statements_from_text(self, text): generator_list = self.__apply_generators(text) for statement in MacroGenerator.generate_text_from_list(generator_list): yield statement class SQLGenerator: def __init__(self, catalog, template, subversion_generation, ascii_only): StringValueGenerator.set_ascii_only(ascii_only) self.__subversion_generation = subversion_generation # Reset the counters IdGenerator.initialize(0) if isinstance(catalog, Schema): self.__schema = catalog else: self.__schema = Schema(filename=catalog) if isinstance(template, Template): self.__template = template else: self.__template = Template(filename=template) self.__statements = self.__template.get_statements() self.__min_statements_per_pattern = sys.maxint self.__max_statements_per_pattern = -1 self.__num_insert_statements = 0 self.__num_unresolved_statements = 0 GENERATOR_TYPES = (TableGenerator, ColumnGenerator, SymbolGenerator, ConstantGenerator, IdGenerator) UNRESOLVED_PUNCTUATION = re.compile(r'[][#@]') # Literally, ']', '[', '#', or '@'. UNRESOLVED_GENERATOR = re.compile(r'(^|\W)[_]') # The presence of an underbar apparently inside a quoted string after the LIKE keyword # is likely enough to be a false positive for an unresolved generator that it is # allowed to pass without the usual warning triggered by a leading underbar. LIKELY_FALSE_ALARMS = re.compile(r"LIKE '[^']*_.*'") def __generate_statement(self, text): text = self.__template.apply_macros(text) text = unicode(text) for statement in self.__template.generate_statements_from_text(text): ### print ('VERBOSE DEBUG: text and statement post-generate_statements_from_text: "' + text + '", "' + statement + '"') statement, generators, field_map = BaseGenerator.prepare_generators(statement, self.__schema, SQLGenerator.GENERATOR_TYPES) ### print ('VERBOSE DEBUG: prepared statement looks like: "' + statement + '"') if (SQLGenerator.UNRESOLVED_PUNCTUATION.search(statement) or (SQLGenerator.UNRESOLVED_GENERATOR.search(statement) and not SQLGenerator.LIKELY_FALSE_ALARMS.search(statement))): print ('WARNING: final statement contains suspicious unresolved symbol(s): "' + statement + '"') print ('with schema "' + self.__schema.debug_schema_to_string() + '"') self.__num_unresolved_statements += 1 for generated_stmt in BaseGenerator.generate_statements_from_list(statement, generators, field_map): yield generated_stmt def generate(self, summarize_successes = False): for s in self.__statements: results = 0 ### print 'DEBUG VERBOSELY SPOUTING INPUT STATEMENT: ' + s for i in self.__generate_statement(s): results += 1 ### print 'DEBUG VERBOSELY SPOUTING OUTPUT STATEMENT: ' + i yield i ### TODO: make generation of the subquery wrapping variant of the select statements optional by some global flag if self.__subversion_generation and re.match("(?i)\s*SELECT", i): results += 1 yield 'SELECT * FROM (' + i + ') subquery' upper_case_statement = i.upper().lstrip() if (upper_case_statement.startswith('INSERT') or upper_case_statement.startswith('UPSERT')): self.__num_insert_statements += 1 if results == 0: print 'Template "%s" failed to yield SQL statements' % s if summarize_successes: print '%d SQL statements generated by template: "%s"' % (results, s) self.__min_statements_per_pattern = min(self.__min_statements_per_pattern, results) self.__max_statements_per_pattern = max(self.__max_statements_per_pattern, results) def min_statements_per_pattern(self): if (self.__min_statements_per_pattern == sys.maxint): # initial value return -1 # indicates no patterns have been used to generate SQL statements else: return self.__min_statements_per_pattern def max_statements_per_pattern(self): return self.__max_statements_per_pattern def num_insert_statements(self): return self.__num_insert_statements def num_patterns(self): return len(self.__statements) def num_unresolved_statements(self): return self.__num_unresolved_statements if __name__ == "__main__": # run the SQLGenerator in a test mode that simply prints its results # given the schema file and statement file referenced on the command line. # The schema file should not contain any generic "@macros" only '_'-prefixed # generator tokens (with optional []-bracketed attributes and @-prefixed labels). parser = OptionParser() parser.add_option("-s", "--seed", dest="seed", help="seed for random number generator") (options, args) = parser.parse_args() if options.seed == None: seed = random.randint(0, 2**63) print "Random seed: %d" % seed else: seed = int(options.seed) print "Using supplied seed: " + str(seed) random.seed(seed) if len(args) < 2: usage() sys.exit(3) catalog = args[0] template = args[1] generator = SQLGenerator(catalog, template, True, False) for i in generator.generate(True): print 'STATEMENT: ' + i
0.00514
# -*- coding:utf-8 -*- # # # Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>. # All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # from datetime import datetime from dateutil.relativedelta import relativedelta from openerp.osv import fields, orm class compute_alerts(orm.TransientModel): _name = 'hr.schedule.alert.compute' _description = 'Check Alerts' _columns = { 'date_start': fields.date( 'Start', required=True, ), 'date_end': fields.date( 'End', required=True, ), 'employee_ids': fields.many2many( 'hr.employee', 'hr_employee_alert_rel', 'generate_id', 'employee_id', 'Employees', ), } def generate_alerts(self, cr, uid, ids, context=None): alert_obj = self.pool.get('hr.schedule.alert') data = self.read(cr, uid, ids, context=context)[0] dStart = datetime.strptime(data['date_start'], '%Y-%m-%d').date() dEnd = datetime.strptime(data['date_end'], '%Y-%m-%d').date() dToday = datetime.strptime(fields.date.context_today( self, cr, uid, context=context), '%Y-%m-%d').date() if dToday < dEnd: dEnd = dToday dNext = dStart for employee_id in data['employee_ids']: while dNext <= dEnd: alert_obj.compute_alerts_by_employee( cr, uid, employee_id, dNext.strftime('%Y-%m-%d'), context=context ) dNext += relativedelta(days=+1) return { 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'hr.schedule.alert', 'domain': [ ('employee_id', 'in', data['employee_ids']), '&', ('name', '>=', data['date_start'] + ' 00:00:00'), ('name', '<=', data['date_end'] + ' 23:59:59') ], 'type': 'ir.actions.act_window', 'target': 'current', 'nodestroy': True, 'context': context, }
0
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import fields, osv, expression from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.misc import unquote as unquote class ir_rule(osv.osv): _name = 'ir.rule' _order = 'name' _MODES = ['read', 'write', 'create', 'unlink'] def _eval_context_for_combinations(self): """Returns a dictionary to use as evaluation context for ir.rule domains, when the goal is to obtain python lists that are easier to parse and combine, but not to actually execute them.""" return {'user': unquote('user'), 'time': unquote('time')} def _eval_context(self, cr, uid): """Returns a dictionary to use as evaluation context for ir.rule domains.""" return {'user': self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid), 'time':time} def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize_domain(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res def _get_value(self, cr, uid, ids, field_name, arg, context=None): res = {} for rule in self.browse(cr, uid, ids, context): if not rule.groups: res[rule.id] = True else: res[rule.id] = False return res def _check_model_obj(self, cr, uid, ids, context=None): return not any(self.pool[rule.model_id.model].is_transient() for rule in self.browse(cr, uid, ids, context)) def _check_model_name(self, cr, uid, ids, context=None): # Don't allow rules on rules records (this model). return not any(rule.model_id.model == self._name for rule in self.browse(cr, uid, ids, context)) _columns = { 'name': fields.char('Name', size=128, select=1), 'active': fields.boolean('Active', help="If you uncheck the active field, it will disable the record rule without deleting it (if you delete a native record rule, it may be re-created when you reload the module."), 'model_id': fields.many2one('ir.model', 'Object',select=1, required=True, ondelete="cascade"), 'global': fields.function(_get_value, string='Global', type='boolean', store=True, help="If no group is specified the rule is global and applied to everyone"), 'groups': fields.many2many('res.groups', 'rule_group_rel', 'rule_group_id', 'group_id', 'Groups'), 'domain_force': fields.text('Domain'), 'domain': fields.function(_domain_force_get, string='Domain', type='text'), 'perm_read': fields.boolean('Apply for Read'), 'perm_write': fields.boolean('Apply for Write'), 'perm_create': fields.boolean('Apply for Create'), 'perm_unlink': fields.boolean('Apply for Delete') } _order = 'model_id DESC' _defaults = { 'active': True, 'perm_read': True, 'perm_write': True, 'perm_create': True, 'perm_unlink': True, 'global': True, } _sql_constraints = [ ('no_access_rights', 'CHECK (perm_read!=False or perm_write!=False or perm_create!=False or perm_unlink!=False)', 'Rule must have at least one checked access right !'), ] _constraints = [ (_check_model_obj, 'Rules can not be applied on Transient models.', ['model_id']), (_check_model_name, 'Rules can not be applied on the Record Rules model.', ['model_id']), ] @tools.ormcache() def _compute_domain(self, cr, uid, model_name, mode="read"): if mode not in self._MODES: raise ValueError('Invalid mode: %r' % (mode,)) if uid == SUPERUSER_ID: return None cr.execute("""SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id = m.id) WHERE m.model = %s AND r.active is True AND r.perm_""" + mode + """ AND (r.id IN (SELECT rule_group_id FROM rule_group_rel g_rel JOIN res_groups_users_rel u_rel ON (g_rel.group_id = u_rel.gid) WHERE u_rel.uid = %s) OR r.global)""", (model_name, uid)) rule_ids = [x[0] for x in cr.fetchall()] if rule_ids: # browse user as super-admin root to avoid access errors! user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid) global_domains = [] # list of domains group_domains = {} # map: group -> list of domains for rule in self.browse(cr, SUPERUSER_ID, rule_ids): # read 'domain' as UID to have the correct eval context for the rule. rule_domain = self.read(cr, uid, rule.id, ['domain'])['domain'] dom = expression.normalize_domain(rule_domain) for group in rule.groups: if group in user.groups_id: group_domains.setdefault(group, []).append(dom) if not rule.groups: global_domains.append(dom) # combine global domains and group domains if group_domains: group_domain = expression.OR(map(expression.OR, group_domains.values())) else: group_domain = [] domain = expression.AND(global_domains + [group_domain]) return domain return [] def clear_cache(self, cr, uid): self._compute_domain.clear_cache(self) def domain_get(self, cr, uid, model_name, mode='read', context=None): dom = self._compute_domain(cr, uid, model_name, mode) if dom: # _where_calc is called as superuser. This means that rules can # involve objects on which the real uid has no acces rights. # This means also there is no implicit restriction (e.g. an object # references another object the user can't see). query = self.pool[model_name]._where_calc(cr, SUPERUSER_ID, dom, active_test=False) return query.where_clause, query.where_clause_params, query.tables return [], [], ['"' + self.pool[model_name]._table + '"'] def unlink(self, cr, uid, ids, context=None): res = super(ir_rule, self).unlink(cr, uid, ids, context=context) self.clear_cache(cr, uid) return res def create(self, cr, uid, vals, context=None): res = super(ir_rule, self).create(cr, uid, vals, context=context) self.clear_cache(cr, uid) return res def write(self, cr, uid, ids, vals, context=None): res = super(ir_rule, self).write(cr, uid, ids, vals, context=context) self.clear_cache(cr,uid) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
0.002608
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RNN helpers for TensorFlow models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.rnn.python.ops import core_rnn as contrib_rnn from tensorflow.python.ops import array_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import variable_scope as vs def stack_bidirectional_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None, dtype=None, sequence_length=None, scope=None): """Creates a bidirectional recurrent neural network. Stacks several bidirectional rnn layers. The combined forward and backward layer outputs are used as input of the next layer. tf.bidirectional_rnn does not allow to share forward and backward information between layers. The input_size of the first forward and backward cells must match. The initial state for both directions is zero and no intermediate states are returned. As described in https://arxiv.org/abs/1303.5778 Args: cells_fw: List of instances of RNNCell, one per layer, to be used for forward direction. cells_bw: List of instances of RNNCell, one per layer, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, input_size], or a nested tuple of such elements. initial_states_fw: (optional) A list of the initial states (one per layer) for the forward RNN. Each tensor must has an appropriate type and shape `[batch_size, cell_fw.state_size]`. initial_states_bw: (optional) Same as for `initial_states_fw`, but using the corresponding properties of `cells_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to None. Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs is a length `T` list of outputs (one for each input), which are depth-concatenated forward and backward outputs. output_states_fw is the final states, one tensor per layer, of the forward rnn. output_states_bw is the final states, one tensor per layer, of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is None, not a list or an empty list. """ if not cells_fw: raise ValueError("Must specify at least one fw cell for BidirectionalRNN.") if not cells_bw: raise ValueError("Must specify at least one bw cell for BidirectionalRNN.") if not isinstance(cells_fw, list): raise ValueError("cells_fw must be a list of RNNCells (one per layer).") if not isinstance(cells_bw, list): raise ValueError("cells_bw must be a list of RNNCells (one per layer).") if len(cells_fw) != len(cells_bw): raise ValueError("Forward and Backward cells must have the same depth.") if initial_states_fw is not None and (not isinstance(cells_fw, list) or len(cells_fw) != len(cells_fw)): raise ValueError( "initial_states_fw must be a list of state tensors (one per layer).") if initial_states_bw is not None and (not isinstance(cells_bw, list) or len(cells_bw) != len(cells_bw)): raise ValueError( "initial_states_bw must be a list of state tensors (one per layer).") states_fw = [] states_bw = [] prev_layer = inputs with vs.variable_scope(scope or "stack_bidirectional_rnn"): for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)): initial_state_fw = None initial_state_bw = None if initial_states_fw: initial_state_fw = initial_states_fw[i] if initial_states_bw: initial_state_bw = initial_states_bw[i] with vs.variable_scope("cell_%d" % i) as cell_scope: prev_layer, state_fw, state_bw = contrib_rnn.static_bidirectional_rnn( cell_fw, cell_bw, prev_layer, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, sequence_length=sequence_length, dtype=dtype, scope=cell_scope) states_fw.append(state_fw) states_bw.append(state_bw) return prev_layer, tuple(states_fw), tuple(states_bw) def stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, initial_states_fw=None, initial_states_bw=None, dtype=None, sequence_length=None, parallel_iterations=None, scope=None): """Creates a dynamic bidirectional recurrent neural network. Stacks several bidirectional rnn layers. The combined forward and backward layer outputs are used as input of the next layer. tf.bidirectional_rnn does not allow to share forward and backward information between layers. The input_size of the first forward and backward cells must match. The initial state for both directions is zero and no intermediate states are returned. Args: cells_fw: List of instances of RNNCell, one per layer, to be used for forward direction. cells_bw: List of instances of RNNCell, one per layer, to be used for backward direction. inputs: The RNN inputs. this must be a tensor of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. initial_states_fw: (optional) A list of the initial states (one per layer) for the forward RNN. Each tensor must has an appropriate type and shape `[batch_size, cell_fw.state_size]`. initial_states_bw: (optional) Same as for `initial_states_fw`, but using the corresponding properties of `cells_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. scope: VariableScope for the created subgraph; defaults to None. Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs: Output `Tensor` shaped: `batch_size, max_time, layers_output]`. Where layers_output are depth-concatenated forward and backward outputs. output_states_fw is the final states, one tensor per layer, of the forward rnn. output_states_bw is the final states, one tensor per layer, of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is `None`. """ if not cells_fw: raise ValueError("Must specify at least one fw cell for BidirectionalRNN.") if not cells_bw: raise ValueError("Must specify at least one bw cell for BidirectionalRNN.") if not isinstance(cells_fw, list): raise ValueError("cells_fw must be a list of RNNCells (one per layer).") if not isinstance(cells_bw, list): raise ValueError("cells_bw must be a list of RNNCells (one per layer).") if len(cells_fw) != len(cells_bw): raise ValueError("Forward and Backward cells must have the same depth.") if initial_states_fw is not None and (not isinstance(cells_fw, list) or len(cells_fw) != len(cells_fw)): raise ValueError( "initial_states_fw must be a list of state tensors (one per layer).") if initial_states_bw is not None and (not isinstance(cells_bw, list) or len(cells_bw) != len(cells_bw)): raise ValueError( "initial_states_bw must be a list of state tensors (one per layer).") states_fw = [] states_bw = [] prev_layer = inputs with vs.variable_scope(scope or "stack_bidirectional_rnn"): for i, (cell_fw, cell_bw) in enumerate(zip(cells_fw, cells_bw)): initial_state_fw = None initial_state_bw = None if initial_states_fw: initial_state_fw = initial_states_fw[i] if initial_states_bw: initial_state_bw = initial_states_bw[i] with vs.variable_scope("cell_%d" % i): outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn( cell_fw, cell_bw, prev_layer, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, sequence_length=sequence_length, parallel_iterations=parallel_iterations, dtype=dtype) # Concat the outputs to create the new input. prev_layer = array_ops.concat(outputs, 2) states_fw.append(state_fw) states_bw.append(state_bw) return prev_layer, tuple(states_fw), tuple(states_bw)
0.003876
# Copyright 2010-2013, Sikuli.org # Released under the MIT License. # modified RaiMan 2013 from org.sikuli.basics import Debug from org.sikuli.script import Region as JRegion from org.sikuli.script import ObserverCallBack from org.sikuli.script.Constants import * import sys import inspect DEBUG=False class Region(JRegion): # support for with: # override all global sikuli functions by this region's methods. def __enter__(self): exclude_list = [ 'ROI' ] if DEBUG: print "with: entering *****", self self._global_funcs = {} dict = sys.modules['__main__'].__dict__ for name in dir(self): if name in exclude_list: continue try: if not inspect.ismethod(getattr(self,name)): continue except: continue if dict.has_key(name): self._global_funcs[name] = dict[name] if DEBUG and name == 'checkWith': print "with: save %s ( %s )"%(name, str(dict[name])[1:]) dict[name] = eval("self."+name) if DEBUG and name == 'checkWith': print "with: is now: %s"%(str(dict[name])[1:]) return self def __exit__(self, type, value, traceback): if DEBUG: print "with: exiting ****", self dict = sys.modules['__main__'].__dict__ for name in self._global_funcs.keys(): dict[name] = self._global_funcs[name] if DEBUG and name == 'checkWith': print "with restore: %s"%(str(dict[name])[1:]) self._global_funcs = None ####################################################################### #---- SIKULI PUBLIC API ####################################################################### # Python wait() needs to be here because Java Object has a final method: wait(long timeout). # If we want to let Sikuli users use wait(int/long timeout), we need this Python method. def wait(self, target, timeout=None): if isinstance(target, int) or isinstance(target, long): target = float(target) if timeout == None: return JRegion.wait(self, target) else: return JRegion.wait(self, target, timeout) # the new Region.text() feature (Tesseract 3) returns utf8 def text(self): return JRegion.text(self).encode("utf8") # observe(): Special setup for Jython # assures, that in any case the same region object is used def onAppear(self, target, handler = None): if not handler: return self.onAppearJ(target, None) class AnonyObserver(ObserverCallBack): def appeared(self, event): handler(event) return self.onAppearJ(target, AnonyObserver()) def onVanish(self, target, handler = None): if not handler: return self.onVanishJ(target, None) class AnonyObserver(ObserverCallBack): def vanished(self, event): handler(event) return self.onVanishJ(target, AnonyObserver()) def onChange(self, arg1=0, arg2=None): if isinstance(arg1, int): min_size = arg1 handler = arg2 else: if (arg2 != None): raise Exception("onChange: Invalid parameters set") min_size = 0 handler = arg1 if not handler: return self.onChangeJ(min_size, None) class AnonyObserver(ObserverCallBack): def changed(self, event): handler(event) return self.onChangeJ(min_size, AnonyObserver()) def observe(self, time=FOREVER, background=False): return self.observeJ(time, background)
0.008134
""" Views and functions for serving static files. These are only to be used during development, and SHOULD NOT be used in a production setting. """ import mimetypes import os import posixpath import re import stat from django.http import ( FileResponse, Http404, HttpResponse, HttpResponseNotModified, ) from django.template import Context, Engine, TemplateDoesNotExist, loader from django.utils._os import safe_join from django.utils.http import http_date, parse_http_date from django.utils.translation import gettext as _, gettext_lazy def serve(request, path, document_root=None, show_indexes=False): """ Serve static files below a given point in the directory structure. To use, put a URL pattern such as:: from django.views.static import serve url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'}) in your URLconf. You must provide the ``document_root`` param. You may also set ``show_indexes`` to ``True`` if you'd like to serve a basic index of the directory. This index view will use the template hardcoded below, but if you'd like to override it, you can create a template called ``static/directory_index.html``. """ path = posixpath.normpath(path).lstrip('/') fullpath = safe_join(document_root, path) if os.path.isdir(fullpath): if show_indexes: return directory_index(path, fullpath) raise Http404(_("Directory indexes are not allowed here.")) if not os.path.exists(fullpath): raise Http404(_('"%(path)s" does not exist') % {'path': fullpath}) # Respect the If-Modified-Since header. statobj = os.stat(fullpath) if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), statobj.st_mtime, statobj.st_size): return HttpResponseNotModified() content_type, encoding = mimetypes.guess_type(fullpath) content_type = content_type or 'application/octet-stream' response = FileResponse(open(fullpath, 'rb'), content_type=content_type) response["Last-Modified"] = http_date(statobj.st_mtime) if stat.S_ISREG(statobj.st_mode): response["Content-Length"] = statobj.st_size if encoding: response["Content-Encoding"] = encoding return response DEFAULT_DIRECTORY_INDEX_TEMPLATE = """ {% load i18n %} <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="Content-type" content="text/html; charset=utf-8" /> <meta http-equiv="Content-Language" content="en-us" /> <meta name="robots" content="NONE,NOARCHIVE" /> <title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title> </head> <body> <h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1> <ul> {% if directory != "/" %} <li><a href="../">../</a></li> {% endif %} {% for f in file_list %} <li><a href="{{ f|urlencode }}">{{ f }}</a></li> {% endfor %} </ul> </body> </html> """ template_translatable = gettext_lazy("Index of %(directory)s") def directory_index(path, fullpath): try: t = loader.select_template([ 'static/directory_index.html', 'static/directory_index', ]) except TemplateDoesNotExist: t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE) c = Context() else: c = {} files = [] for f in os.listdir(fullpath): if not f.startswith('.'): if os.path.isdir(os.path.join(fullpath, f)): f += '/' files.append(f) c.update({ 'directory': path + '/', 'file_list': files, }) return HttpResponse(t.render(c)) def was_modified_since(header=None, mtime=0, size=0): """ Was something modified since the user last downloaded it? header This is the value of the If-Modified-Since header. If this is None, I'll just return True. mtime This is the modification time of the item we're talking about. size This is the size of the item we're talking about. """ try: if header is None: raise ValueError matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header, re.IGNORECASE) header_mtime = parse_http_date(matches.group(1)) header_len = matches.group(3) if header_len and int(header_len) != size: raise ValueError if int(mtime) > header_mtime: raise ValueError except (AttributeError, ValueError, OverflowError): return True return False
0.000216
# Copyright 1999-2009 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from _emerge.BinpkgFetcher import BinpkgFetcher from _emerge.CompositeTask import CompositeTask from _emerge.BinpkgVerifier import BinpkgVerifier from portage import os class BinpkgPrefetcher(CompositeTask): __slots__ = ("pkg",) + \ ("pkg_path", "_bintree",) def _start(self): self._bintree = self.pkg.root_config.trees["bintree"] fetcher = BinpkgFetcher(background=self.background, logfile=self.scheduler.fetch.log_file, pkg=self.pkg, scheduler=self.scheduler) self.pkg_path = fetcher.pkg_path self._start_task(fetcher, self._fetcher_exit) def _fetcher_exit(self, fetcher): if self._default_exit(fetcher) != os.EX_OK: self.wait() return verifier = BinpkgVerifier(background=self.background, logfile=self.scheduler.fetch.log_file, pkg=self.pkg, scheduler=self.scheduler, _pkg_path=self.pkg_path) self._start_task(verifier, self._verifier_exit) def _verifier_exit(self, verifier): if self._default_exit(verifier) != os.EX_OK: self.wait() return self._bintree.inject(self.pkg.cpv, filename=self.pkg_path) self._current_task = None self.returncode = os.EX_OK self.wait()
0.025
#!/usr/bin/env python # -*- coding: utf-8 -*- import locale import socket import tarfile import urllib2 import zipfile from sklearn.cross_validation import StratifiedShuffleSplit, KFold from files import * from general import * from ui import * class Dataset(object): """Dataset base class. The specific dataset classes are inherited from this class, and only needed methods are reimplemented. """ def __init__(self, data_path='data', name='dataset'): """__init__ method. Parameters ---------- data_path : str Basepath where the dataset is stored. (Default value='data') """ # Folder name for dataset self.name = name # Path to the dataset self.local_path = os.path.join(data_path, self.name) # Create the dataset path if does not exist if not os.path.isdir(self.local_path): os.makedirs(self.local_path) # Evaluation setup folder self.evaluation_setup_folder = 'evaluation_setup' # Path to the folder containing evaluation setup files self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder) # Meta data file, csv-format self.meta_filename = 'meta.txt' # Path to meta data file self.meta_file = os.path.join(self.local_path, self.meta_filename) # Hash file to detect removed or added files self.filelisthash_filename = 'filelist.hash' # Number of evaluation folds self.evaluation_folds = 1 # List containing dataset package items # Define this in the inherited class. # Format: # { # 'remote_package': download_url, # 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'), # 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'), # } self.package_list = [] # List of audio files self.files = None # List of meta data dict self.meta_data = None # Training meta data for folds self.evaluation_data_train = {} # Testing meta data for folds self.evaluation_data_test = {} # Recognized audio extensions self.audio_extensions = {'wav', 'flac'} # Info fields for dataset self.authors = '' self.name_remote = '' self.url = '' self.audio_source = '' self.audio_type = '' self.recording_device_model = '' self.microphone_model = '' @property def audio_files(self): """Get all audio files in the dataset Parameters ---------- Nothing Returns ------- filelist : list File list with absolute paths """ if self.files is None: self.files = [] for item in self.package_list: path = item['local_audio_path'] if path: l = os.listdir(path) for f in l: file_name, file_extension = os.path.splitext(f) if file_extension[1:] in self.audio_extensions: self.files.append(os.path.abspath(os.path.join(path, f))) self.files.sort() return self.files @property def audio_file_count(self): """Get number of audio files in dataset Parameters ---------- Nothing Returns ------- filecount : int Number of audio files """ return len(self.audio_files) @property def meta(self): """Get meta data for dataset. If not already read from disk, data is read and returned. Parameters ---------- Nothing Returns ------- meta_data : list List containing meta data as dict. Raises ------- IOError meta file not found. """ if self.meta_data is None: self.meta_data = [] meta_id = 0 if os.path.isfile(self.meta_file): f = open(self.meta_file, 'rt') try: reader = csv.reader(f, delimiter='\t') for row in reader: if len(row) == 2: # Scene meta self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()}) elif len(row) == 4: # Audio tagging meta self.meta_data.append( {'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(), 'tags': row[3].split(';')}) elif len(row) == 6: # Event meta self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip(), 'event_onset': float(row[2]), 'event_offset': float(row[3]), 'event_label': row[4].rstrip(), 'event_type': row[5].rstrip(), 'id': meta_id }) meta_id += 1 finally: f.close() else: raise IOError("Meta file not found [%s]" % self.meta_file) return self.meta_data @property def meta_count(self): """Number of meta data items. Parameters ---------- Nothing Returns ------- meta_item_count : int Meta data item count """ return len(self.meta) @property def fold_count(self): """Number of fold in the evaluation setup. Parameters ---------- Nothing Returns ------- fold_count : int Number of folds """ return self.evaluation_folds @property def scene_labels(self): """List of unique scene labels in the meta data. Parameters ---------- Nothing Returns ------- labels : list List of scene labels in alphabetical order. """ labels = [] for item in self.meta: if 'scene_label' in item and item['scene_label'] not in labels: labels.append(item['scene_label']) labels.sort() return labels @property def scene_label_count(self): """Number of unique scene labels in the meta data. Parameters ---------- Nothing Returns ------- scene_label_count : int Number of unique scene labels. """ return len(self.scene_labels) @property def event_labels(self): """List of unique event labels in the meta data. Parameters ---------- Nothing Returns ------- labels : list List of event labels in alphabetical order. """ labels = [] for item in self.meta: if 'event_label' in item and item['event_label'].rstrip() not in labels: labels.append(item['event_label'].rstrip()) labels.sort() return labels @property def event_label_count(self): """Number of unique event labels in the meta data. Parameters ---------- Nothing Returns ------- event_label_count : int Number of unique event labels """ return len(self.event_labels) @property def audio_tags(self): """List of unique audio tags in the meta data. Parameters ---------- Nothing Returns ------- labels : list List of audio tags in alphabetical order. """ tags = [] for item in self.meta: if 'tags' in item: for tag in item['tags']: if tag and tag not in tags: tags.append(tag) tags.sort() return tags @property def audio_tag_count(self): """Number of unique audio tags in the meta data. Parameters ---------- Nothing Returns ------- audio_tag_count : int Number of unique audio tags """ return len(self.audio_tags) def __getitem__(self, i): """Getting meta data item Parameters ---------- i : int item id Returns ------- meta_data : dict Meta data item """ if i < len(self.meta): return self.meta[i] else: return None def __iter__(self): """Iterator for meta data items Parameters ---------- Nothing Returns ------- Nothing """ i = 0 meta = self[i] # yield window while it's valid while meta is not None: yield meta # get next item i += 1 meta = self[i] @staticmethod def print_bytes(num_bytes): """Output number of bytes according to locale and with IEC binary prefixes Parameters ---------- num_bytes : int > 0 [scalar] Bytes Returns ------- bytes : str Human readable string """ KiB = 1024 MiB = KiB * KiB GiB = KiB * MiB TiB = KiB * GiB PiB = KiB * TiB EiB = KiB * PiB ZiB = KiB * EiB YiB = KiB * ZiB locale.setlocale(locale.LC_ALL, '') output = locale.format("%d", num_bytes, grouping=True) + ' bytes' if num_bytes > YiB: output += ' (%.4g YiB)' % (num_bytes / YiB) elif num_bytes > ZiB: output += ' (%.4g ZiB)' % (num_bytes / ZiB) elif num_bytes > EiB: output += ' (%.4g EiB)' % (num_bytes / EiB) elif num_bytes > PiB: output += ' (%.4g PiB)' % (num_bytes / PiB) elif num_bytes > TiB: output += ' (%.4g TiB)' % (num_bytes / TiB) elif num_bytes > GiB: output += ' (%.4g GiB)' % (num_bytes / GiB) elif num_bytes > MiB: output += ' (%.4g MiB)' % (num_bytes / MiB) elif num_bytes > KiB: output += ' (%.4g KiB)' % (num_bytes / KiB) return output def download(self): """Download dataset over the internet to the local path Parameters ---------- Nothing Returns ------- Nothing Raises ------- IOError Download failed. """ section_header('Download dataset') for item in self.package_list: try: if item['remote_package'] and not os.path.isfile(item['local_package']): data = None req = urllib2.Request(item['remote_package'], data, {}) handle = urllib2.urlopen(req) if "Content-Length" in handle.headers.items(): size = int(handle.info()["Content-Length"]) else: size = None actualSize = 0 blocksize = 64 * 1024 tmp_file = os.path.join(self.local_path, 'tmp_file') fo = open(tmp_file, "wb") terminate = False while not terminate: block = handle.read(blocksize) actualSize += len(block) if size: progress(title_text=os.path.split(item['local_package'])[1], percentage=actualSize / float(size), note=self.print_bytes(actualSize)) else: progress(title_text=os.path.split(item['local_package'])[1], note=self.print_bytes(actualSize)) if len(block) == 0: break fo.write(block) fo.close() os.rename(tmp_file, item['local_package']) except (urllib2.URLError, socket.timeout), e: try: fo.close() except: raise IOError('Download failed [%s]' % (item['remote_package'])) foot() def extract(self): """Extract the dataset packages Parameters ---------- Nothing Returns ------- Nothing """ section_header('Extract dataset') for item_id, item in enumerate(self.package_list): if item['local_package']: if item['local_package'].endswith('.zip'): with zipfile.ZipFile(item['local_package'], "r") as z: # Trick to omit first level folder parts = [] for name in z.namelist(): if not name.endswith('/'): parts.append(name.split('/')[:-1]) prefix = os.path.commonprefix(parts) or '' if prefix: if len(prefix) > 1: prefix_ = list() prefix_.append(prefix[0]) prefix = prefix_ prefix = '/'.join(prefix) + '/' offset = len(prefix) # Start extraction members = z.infolist() file_count = 1 for i, member in enumerate(members): if len(member.filename) > offset: member.filename = member.filename[offset:] if not os.path.isfile(os.path.join(self.local_path, member.filename)): z.extract(member, self.local_path) progress( title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']', percentage=(file_count / float(len(members))), note=member.filename) file_count += 1 elif item['local_package'].endswith('.tar.gz'): tar = tarfile.open(item['local_package'], "r:gz") for i, tar_info in enumerate(tar): if not os.path.isfile(os.path.join(self.local_path, tar_info.name)): tar.extract(tar_info, self.local_path) progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']', note=tar_info.name) tar.members = [] tar.close() foot() def on_after_extract(self): """Dataset meta data preparation, this will be overloaded in dataset specific classes Parameters ---------- Nothing Returns ------- Nothing """ pass def get_filelist(self): """List of files under local_path Parameters ---------- Nothing Returns ------- filelist: list File list """ filelist = [] for path, subdirs, files in os.walk(self.local_path): for name in files: filelist.append(os.path.join(path, name)) return filelist def check_filelist(self): """Generates hash from file list and check does it matches with one saved in filelist.hash. If some files have been deleted or added, checking will result False. Parameters ---------- Nothing Returns ------- result: bool Result """ if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)): hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0] if hash != get_parameter_hash(sorted(self.get_filelist())): return False else: return True else: return False def save_filelist_hash(self): """Generates file list hash, and saves it as filelist.hash under local_path. Parameters ---------- Nothing Returns ------- Nothing """ filelist = self.get_filelist() filelist_hash_not_found = True for file in filelist: if self.filelisthash_filename in file: filelist_hash_not_found = False if filelist_hash_not_found: filelist.append(os.path.join(self.local_path, self.filelisthash_filename)) save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist))) def fetch(self): """Download, extract and prepare the dataset. Parameters ---------- Nothing Returns ------- Nothing """ if not self.check_filelist(): self.download() self.extract() self.on_after_extract() self.save_filelist_hash() return self def train(self, fold=0): """List of training items. Parameters ---------- fold : int > 0 [scalar] Fold id, if zero all meta data is returned. (Default value=0) Returns ------- list : list of dicts List containing all meta data assigned to training set for given fold. """ if fold not in self.evaluation_data_train: self.evaluation_data_train[fold] = [] if fold > 0: with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f: for row in csv.reader(f, delimiter='\t'): if len(row) == 2: # Scene meta self.evaluation_data_train[fold].append({ 'file': self.relative_to_absolute_path(row[0]), 'scene_label': row[1] }) elif len(row) == 4: # Audio tagging meta self.evaluation_data_train[fold].append({ 'file': self.relative_to_absolute_path(row[0]), 'scene_label': row[1], 'tag_string': row[2], 'tags': row[3].split(';') }) elif len(row) == 5: # Event meta self.evaluation_data_train[fold].append({ 'file': self.relative_to_absolute_path(row[0]), 'scene_label': row[1], 'event_onset': float(row[2]), 'event_offset': float(row[3]), 'event_label': row[4] }) else: data = [] for item in self.meta: if 'event_label' in item: data.append({'file': self.relative_to_absolute_path(item['file']), 'scene_label': item['scene_label'], 'event_onset': item['event_onset'], 'event_offset': item['event_offset'], 'event_label': item['event_label'], }) else: data.append({'file': self.relative_to_absolute_path(item['file']), 'scene_label': item['scene_label'] }) self.evaluation_data_train[0] = data return self.evaluation_data_train[fold] def test(self, fold=0): """List of testing items. Parameters ---------- fold : int > 0 [scalar] Fold id, if zero all meta data is returned. (Default value=0) Returns ------- list : list of dicts List containing all meta data assigned to testing set for given fold. """ if fold not in self.evaluation_data_test: self.evaluation_data_test[fold] = [] if fold > 0: with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f: for row in csv.reader(f, delimiter='\t'): self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])}) else: data = [] files = [] for item in self.meta: if self.relative_to_absolute_path(item['file']) not in files: data.append({'file': self.relative_to_absolute_path(item['file'])}) files.append(self.relative_to_absolute_path(item['file'])) self.evaluation_data_test[fold] = data return self.evaluation_data_test[fold] def folds(self, mode='folds'): """List of fold ids Parameters ---------- mode : str {'folds','full'} Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training. (Default value=folds) Returns ------- list : list of integers Fold ids """ if mode == 'folds': return range(1, self.evaluation_folds + 1) elif mode == 'full': return [0] def file_meta(self, file): """Meta data for given file Parameters ---------- file : str File name Returns ------- list : list of dicts List containing all meta data related to given file. """ file = self.absolute_to_relative(file) file_meta = [] for item in self.meta: if item['file'] == file: file_meta.append(item) return file_meta def relative_to_absolute_path(self, path): """Converts relative path into absolute path. Parameters ---------- path : str Relative path Returns ------- path : str Absolute path """ return os.path.abspath(os.path.join(self.local_path, path)) def absolute_to_relative(self, path): """Converts absolute path into relative path. Parameters ---------- path : str Absolute path Returns ------- path : str Relative path """ if path.startswith(os.path.abspath(self.local_path)): return os.path.relpath(path, self.local_path) else: return path # ===================================================== # DCASE 2016 # ===================================================== class TUTAcousticScenes_2016_DevelopmentSet(Dataset): """TUT Acoustic scenes 2016 development dataset This dataset is used in DCASE2016 - Task 1, Acoustic scene classification """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development') self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen' self.name_remote = 'TUT Acoustic Scenes 2016, development dataset' self.url = 'https://zenodo.org/record/45739' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Roland Edirol R-09' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 4 self.package_list = [ { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip', 'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), } ] def on_after_extract(self): """After dataset packages are downloaded and extracted, meta-files are checked. Parameters ---------- nothing Returns ------- nothing """ if not os.path.isfile(self.meta_file): section_header('Generating meta file for dataset') meta_data = {} for fold in xrange(1, self.evaluation_folds): # Read train files in train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt') f = open(train_filename, 'rt') reader = csv.reader(f, delimiter='\t') for row in reader: if row[0] not in meta_data: meta_data[row[0]] = row[1] f.close() # Read evaluation files in eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt') f = open(eval_filename, 'rt') reader = csv.reader(f, delimiter='\t') for row in reader: if row[0] not in meta_data: meta_data[row[0]] = row[1] f.close() f = open(self.meta_file, 'wt') try: writer = csv.writer(f, delimiter='\t') for file in meta_data: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) label = meta_data[file] writer.writerow((os.path.join(relative_path, raw_filename), label)) finally: f.close() foot() class TUTAcousticScenes_2016_EvaluationSet(Dataset): """TUT Acoustic scenes 2016 evaluation dataset This dataset is used in DCASE2016 - Task 1, Acoustic scene classification """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation') self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen' self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset' self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Roland Edirol R-09' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 1 self.package_list = [ { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio'), }, ] def on_after_extract(self): """After dataset packages are downloaded and extracted, meta-files are checked. Parameters ---------- nothing Returns ------- nothing """ eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt') if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename): section_header('Generating meta file for dataset') meta_data = {} f = open(eval_filename, 'rt') reader = csv.reader(f, delimiter='\t') for row in reader: if row[0] not in meta_data: meta_data[row[0]] = row[1] f.close() f = open(self.meta_file, 'wt') try: writer = csv.writer(f, delimiter='\t') for file in meta_data: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) label = meta_data[file] writer.writerow((os.path.join(relative_path, raw_filename), label)) finally: f.close() foot() def train(self, fold=0): raise IOError('Train setup not available.') # TUT Sound events 2016 development and evaluation sets class TUTSoundEvents_2016_DevelopmentSet(Dataset): """TUT Sound events 2016 development dataset This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development') self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen' self.name_remote = 'TUT Sound Events 2016, development dataset' self.url = 'https://zenodo.org/record/45759' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Roland Edirol R-09' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 4 self.package_list = [ { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'), }, { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio', 'home'), }, { 'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip', 'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip', 'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip', 'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'), 'local_audio_path': os.path.join(self.local_path, 'audio'), }, ] def event_label_count(self, scene_label=None): return len(self.event_labels(scene_label=scene_label)) def event_labels(self, scene_label=None): labels = [] for item in self.meta: if scene_label is None or item['scene_label'] == scene_label: if 'event_label' in item and item['event_label'].rstrip() not in labels: labels.append(item['event_label'].rstrip()) labels.sort() return labels def on_after_extract(self): """After dataset packages are downloaded and extracted, meta-files are checked. Parameters ---------- nothing Returns ------- nothing """ if not os.path.isfile(self.meta_file): meta_file_handle = open(self.meta_file, 'wt') try: writer = csv.writer(meta_file_handle, delimiter='\t') for filename in self.audio_files: raw_path, raw_filename = os.path.split(filename) relative_path = self.absolute_to_relative(raw_path) scene_label = relative_path.replace('audio', '')[1:] base_filename, file_extension = os.path.splitext(raw_filename) annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann') if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, float(annotation_file_row[0].replace(',', '.')), float(annotation_file_row[1].replace(',', '.')), annotation_file_row[2], 'm')) finally: annotation_file_handle.close() finally: meta_file_handle.close() def train(self, fold=0, scene_label=None): if fold not in self.evaluation_data_train: self.evaluation_data_train[fold] = {} for scene_label_ in self.scene_labels: if scene_label_ not in self.evaluation_data_train[fold]: self.evaluation_data_train[fold][scene_label_] = [] if fold > 0: with open( os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'), 'rt') as f: for row in csv.reader(f, delimiter='\t'): if len(row) == 5: # Event meta self.evaluation_data_train[fold][scene_label_].append({ 'file': self.relative_to_absolute_path(row[0]), 'scene_label': row[1], 'event_onset': float(row[2]), 'event_offset': float(row[3]), 'event_label': row[4] }) else: data = [] for item in self.meta: if item['scene_label'] == scene_label_: if 'event_label' in item: data.append({'file': self.relative_to_absolute_path(item['file']), 'scene_label': item['scene_label'], 'event_onset': item['event_onset'], 'event_offset': item['event_offset'], 'event_label': item['event_label'], }) self.evaluation_data_train[0][scene_label_] = data if scene_label: return self.evaluation_data_train[fold][scene_label] else: data = [] for scene_label_ in self.scene_labels: for item in self.evaluation_data_train[fold][scene_label_]: data.append(item) return data def test(self, fold=0, scene_label=None): if fold not in self.evaluation_data_test: self.evaluation_data_test[fold] = {} for scene_label_ in self.scene_labels: if scene_label_ not in self.evaluation_data_test[fold]: self.evaluation_data_test[fold][scene_label_] = [] if fold > 0: with open( os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'), 'rt') as f: for row in csv.reader(f, delimiter='\t'): self.evaluation_data_test[fold][scene_label_].append( {'file': self.relative_to_absolute_path(row[0])}) else: data = [] files = [] for item in self.meta: if scene_label_ in item: if self.relative_to_absolute_path(item['file']) not in files: data.append({'file': self.relative_to_absolute_path(item['file'])}) files.append(self.relative_to_absolute_path(item['file'])) self.evaluation_data_test[0][scene_label_] = data if scene_label: return self.evaluation_data_test[fold][scene_label] else: data = [] for scene_label_ in self.scene_labels: for item in self.evaluation_data_test[fold][scene_label_]: data.append(item) return data class TUTSoundEvents_2016_EvaluationSet(Dataset): """TUT Sound events 2016 evaluation dataset This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation') self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen' self.name_remote = 'TUT Sound Events 2016, evaluation dataset' self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Roland Edirol R-09' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 1 self.package_list = [ { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio'), }, { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio', 'home'), }, { 'remote_package': None, 'local_package': None, 'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'), }, ] @property def scene_labels(self): labels = ['home', 'residential_area'] labels.sort() return labels def event_label_count(self, scene_label=None): return len(self.event_labels(scene_label=scene_label)) def event_labels(self, scene_label=None): labels = [] for item in self.meta: if scene_label is None or item['scene_label'] == scene_label: if 'event_label' in item and item['event_label'] not in labels: labels.append(item['event_label']) labels.sort() return labels def on_after_extract(self): """After dataset packages are downloaded and extracted, meta-files are checked. Parameters ---------- nothing Returns ------- nothing """ if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')): meta_file_handle = open(self.meta_file, 'wt') try: writer = csv.writer(meta_file_handle, delimiter='\t') for filename in self.audio_files: raw_path, raw_filename = os.path.split(filename) relative_path = self.absolute_to_relative(raw_path) scene_label = relative_path.replace('audio', '')[1:] base_filename, file_extension = os.path.splitext(raw_filename) annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann') if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, float(annotation_file_row[0].replace(',', '.')), float(annotation_file_row[1].replace(',', '.')), annotation_file_row[2], 'm')) finally: annotation_file_handle.close() finally: meta_file_handle.close() def train(self, fold=0, scene_label=None): raise IOError('Train setup not available.') def test(self, fold=0, scene_label=None): if fold not in self.evaluation_data_test: self.evaluation_data_test[fold] = {} for scene_label_ in self.scene_labels: if scene_label_ not in self.evaluation_data_test[fold]: self.evaluation_data_test[fold][scene_label_] = [] if fold > 0: with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f: for row in csv.reader(f, delimiter='\t'): self.evaluation_data_test[fold][scene_label_].append( {'file': self.relative_to_absolute_path(row[0])}) else: data = [] files = [] for item in self.audio_files: if scene_label_ in item: if self.relative_to_absolute_path(item) not in files: data.append({'file': self.relative_to_absolute_path(item)}) files.append(self.relative_to_absolute_path(item)) self.evaluation_data_test[0][scene_label_] = data if scene_label: return self.evaluation_data_test[fold][scene_label] else: data = [] for scene_label_ in self.scene_labels: for item in self.evaluation_data_test[fold][scene_label_]: data.append(item) return data # CHIME home class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset): def __init__(self, data_path=None): Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development') self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley' self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.' self.url = '' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Unknown' self.microphone_model = 'Unknown' self.evaluation_folds = 10 self.package_list = [ { 'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz', 'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'), 'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'), }, ] @property def audio_files(self): """Get all audio files in the dataset, use only file from CHime-Home-refined set. Parameters ---------- nothing Returns ------- files : list audio files """ if self.files is None: refined_files = [] with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f: for row in csv.reader(f, delimiter=','): refined_files.append(row[1]) self.files = [] for file in self.package_list: path = file['local_audio_path'] if path: l = os.listdir(path) p = path.replace(self.local_path + os.path.sep, '') for f in l: fileName, fileExtension = os.path.splitext(f) if fileExtension[1:] in self.audio_extensions and fileName in refined_files: self.files.append(os.path.abspath(os.path.join(path, f))) self.files.sort() return self.files def read_chunk_meta(self, meta_filename): if os.path.isfile(meta_filename): meta_file_handle = open(meta_filename, 'rt') try: meta_file_reader = csv.reader(meta_file_handle, delimiter=',') data = {} for meta_file_row in meta_file_reader: data[meta_file_row[0]] = meta_file_row[1] finally: meta_file_handle.close() return data def tagcode_to_taglabel(self, tag): map = {'c': 'child speech', 'm': 'adult male speech', 'f': 'adult female speech', 'v': 'video game/tv', 'p': 'percussive sound', 'b': 'broadband noise', 'o': 'other', 'S': 'silence/background', 'U': 'unidentifiable' } if tag in map: return map[tag] else: return None def on_after_extract(self): """After dataset packages are downloaded and extracted, meta-files are checked. Legacy dataset meta files are converted to be compatible with current scheme. Parameters ---------- nothing Returns ------- nothing """ if not os.path.isfile(self.meta_file): section_header('Generating meta file for dataset') scene_label = 'home' f = open(self.meta_file, 'wt') try: writer = csv.writer(f, delimiter='\t') for file in self.audio_files: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) base_filename, file_extension = os.path.splitext(raw_filename) annotation_filename = os.path.join(raw_path, base_filename + '.csv') meta_data = self.read_chunk_meta(annotation_filename) tags = [] for i, tag in enumerate(meta_data['majorityvote']): if tag is 'b': print file if tag is not 'S' and tag is not 'U': tags.append(self.tagcode_to_taglabel(tag)) tags = ';'.join(tags) writer.writerow( (os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags)) finally: f.close() foot() all_folds_found = True for fold in xrange(1, self.evaluation_folds): for target_tag in self.audio_tags: if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ', '_') + '_train.txt')): all_folds_found = False if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ', '_') + '_test.txt')): all_folds_found = False if not all_folds_found: if not os.path.isdir(self.evaluation_setup_path): os.makedirs(self.evaluation_setup_path) numpy.random.seed(475686) kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True) refined_files = [] with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f: for row in csv.reader(f, delimiter=','): refined_files.append( self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav'))) fold = 1 files = numpy.array(refined_files) for train_index, test_index in kf: train_files = files[train_index] test_files = files[test_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in train_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') item = self.file_meta(file)[0] writer.writerow( [os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'], ';'.join(item['tags'])]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') writer.writerow([os.path.join(relative_path, raw_filename)]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') item = self.file_meta(file)[0] writer.writerow( [os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'], ';'.join(item['tags'])]) fold += 1 # Legacy datasets # ===================================================== # DCASE 2013 # ===================================================== class DCASE2013_Scene_DevelopmentSet(Dataset): """DCASE 2013 Acoustic scene classification, development dataset """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development') self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley' self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task' self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Unknown' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 5 self.package_list = [ { 'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1', 'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'), 'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'), } ] def on_after_extract(self): # Make legacy dataset compatible with DCASE2016 dataset scheme if not os.path.isfile(self.meta_file): section_header('Generating meta file for dataset') f = open(self.meta_file, 'wt') try: writer = csv.writer(f, delimiter='\t') for file in self.audio_files: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) label = os.path.splitext(os.path.split(file)[1])[0][:-2] writer.writerow((os.path.join(relative_path, raw_filename), label)) finally: f.close() foot() all_folds_found = True for fold in xrange(1, self.evaluation_folds): if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')): all_folds_found = False if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')): all_folds_found = False if not all_folds_found: section_header('Generating evaluation setup files for dataset') if not os.path.isdir(self.evaluation_setup_path): os.makedirs(self.evaluation_setup_path) classes = [] files = [] for item in self.meta: classes.append(item['scene_label']) files.append(item['file']) files = numpy.array(files) sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0) fold = 1 for train_index, test_index in sss: # print("TRAIN:", train_index, "TEST:", test_index) train_files = files[train_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in train_files: raw_path, raw_filename = os.path.split(file) label = self.file_meta(file)[0]['scene_label'] writer.writerow([os.path.join(raw_path, raw_filename), label]) test_files = files[test_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) writer.writerow([os.path.join(raw_path, raw_filename)]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) label = self.file_meta(file)[0]['scene_label'] writer.writerow([os.path.join(raw_path, raw_filename), label]) fold += 1 foot() class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet): """DCASE 2013 Acoustic scene classification, evaluation dataset """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge') self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley' self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task' self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Unknown' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 5 self.package_list = [ { 'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip', 'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'), 'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'), } ] def on_after_extract(self): # Make legacy dataset compatible with DCASE2016 dataset scheme if not os.path.isfile(self.meta_file) or 1: section_header('Generating meta file for dataset') f = open(self.meta_file, 'wt') try: writer = csv.writer(f, delimiter='\t') for file in self.audio_files: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) label = os.path.splitext(os.path.split(file)[1])[0][:-2] writer.writerow((os.path.join(relative_path, raw_filename), label)) finally: f.close() foot() all_folds_found = True for fold in xrange(1, self.evaluation_folds): if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')): all_folds_found = False if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')): all_folds_found = False if not all_folds_found: section_header('Generating evaluation setup files for dataset') if not os.path.isdir(self.evaluation_setup_path): os.makedirs(self.evaluation_setup_path) classes = [] files = [] for item in self.meta: classes.append(item['scene_label']) files.append(item['file']) files = numpy.array(files) sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0) fold = 1 for train_index, test_index in sss: train_files = files[train_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in train_files: raw_path, raw_filename = os.path.split(file) label = self.file_meta(file)[0]['scene_label'] writer.writerow([os.path.join(raw_path, raw_filename), label]) test_files = files[test_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) writer.writerow([os.path.join(raw_path, raw_filename)]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) label = self.file_meta(file)[0]['scene_label'] writer.writerow([os.path.join(raw_path, raw_filename), label]) fold += 1 foot() # Sound events class DCASE2013_Event_DevelopmentSet(Dataset): """DCASE 2013 Sound event detection, development dataset """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development') self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley' self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task' self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Unknown' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 5 self.package_list = [ { 'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip', 'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'), 'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'), }, # { # 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9', # 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'), # 'local_audio_path': None, # }, # { # 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7', # 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'), # 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'), # }, ] def on_after_extract(self): # Make legacy dataset compatible with DCASE2016 dataset scheme scene_label = 'office' if not os.path.isfile(self.meta_file): meta_file_handle = open(self.meta_file, 'wt') try: writer = csv.writer(meta_file_handle, delimiter='\t') for file in self.audio_files: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) base_filename, file_extension = os.path.splitext(raw_filename) if file.find('singlesounds_stereo') != -1: annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt') label = base_filename[:-2] if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, annotation_file_row[0], annotation_file_row[1], label, 'i')) finally: annotation_file_handle.close() elif file.find('events_OS_development_v2') != -1: annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2', base_filename + '_v2.txt') if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, annotation_file_row[0], annotation_file_row[1], annotation_file_row[2], 'm')) finally: annotation_file_handle.close() finally: meta_file_handle.close() all_folds_found = True for fold in xrange(1, self.evaluation_folds): if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')): all_folds_found = False if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')): all_folds_found = False if not all_folds_found: # Construct training and testing sets. Isolated sound are used for training and # polyphonic mixtures are used for testing. if not os.path.isdir(self.evaluation_setup_path): os.makedirs(self.evaluation_setup_path) files = [] for item in self.meta: if item['file'] not in files: files.append(item['file']) files = numpy.array(files) f = numpy.zeros(len(files)) sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0) fold = 1 for train_index, test_index in sss: # print("TRAIN:", train_index, "TEST:", test_index) train_files = files[train_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in train_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') for item in self.meta: if item['file'] == file: writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'], item['event_onset'], item['event_offset'], item['event_label']]) test_files = files[test_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') writer.writerow([os.path.join(relative_path, raw_filename)]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') for item in self.meta: if item['file'] == file: writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'], item['event_onset'], item['event_offset'], item['event_label']]) fold += 1 class DCASE2013_Event_EvaluationSet(Dataset): """DCASE 2013 Sound event detection, evaluation dataset """ def __init__(self, data_path='data'): Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge') self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley' self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task' self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/' self.audio_source = 'Field recording' self.audio_type = 'Natural' self.recording_device_model = 'Unknown' self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone' self.evaluation_folds = 5 self.package_list = [ { 'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip', 'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'), 'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'), } ] def on_after_extract(self): # Make legacy dataset compatible with DCASE2016 dataset scheme scene_label = 'office' if not os.path.isfile(self.meta_file): meta_file_handle = open(self.meta_file, 'wt') try: writer = csv.writer(meta_file_handle, delimiter='\t') for file in self.audio_files: raw_path, raw_filename = os.path.split(file) relative_path = self.absolute_to_relative(raw_path) base_filename, file_extension = os.path.splitext(raw_filename) if file.find('dcase2013_event_detection_testset_OS') != -1: annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS', base_filename + '_v2.txt') if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, annotation_file_row[0], annotation_file_row[1], annotation_file_row[2], 'm')) finally: annotation_file_handle.close() else: annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS', base_filename + '.txt') if os.path.isfile(annotation_filename): annotation_file_handle = open(annotation_filename, 'rt') try: annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t') for annotation_file_row in annotation_file_reader: writer.writerow((os.path.join(relative_path, raw_filename), scene_label, annotation_file_row[0], annotation_file_row[1], annotation_file_row[2], 'm')) finally: annotation_file_handle.close() finally: meta_file_handle.close() all_folds_found = True for fold in xrange(1, self.evaluation_folds): if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')): all_folds_found = False if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')): all_folds_found = False if not all_folds_found: # Construct training and testing sets. Isolated sound are used for training and # polyphonic mixtures are used for testing. if not os.path.isdir(self.evaluation_setup_path): os.makedirs(self.evaluation_setup_path) files = [] for item in self.meta: if item['file'] not in files: files.append(item['file']) files = numpy.array(files) f = numpy.zeros(len(files)) sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0) fold = 1 for train_index, test_index in sss: # print("TRAIN:", train_index, "TEST:", test_index) train_files = files[train_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in train_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') for item in self.meta: if item['file'] == file: writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'], item['event_onset'], item['event_offset'], item['event_label']]) test_files = files[test_index] with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') writer.writerow([os.path.join(relative_path, raw_filename)]) with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f: writer = csv.writer(f, delimiter='\t') for file in test_files: raw_path, raw_filename = os.path.split(file) relative_path = raw_path.replace(self.local_path + os.path.sep, '') for item in self.meta: if item['file'] == file: writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'], item['event_onset'], item['event_offset'], item['event_label']]) fold += 1
0.003507
# coding: utf-8 from __future__ import unicode_literals from .brightcove import BrightcoveNewIE from ..utils import extract_attributes class BandaiChannelIE(BrightcoveNewIE): IE_NAME = 'bandaichannel' _VALID_URL = r'https?://(?:www\.)?b-ch\.com/titles/(?P<id>\d+/\d+)' _TESTS = [{ 'url': 'https://www.b-ch.com/titles/514/001', 'md5': 'a0f2d787baa5729bed71108257f613a4', 'info_dict': { 'id': '6128044564001', 'ext': 'mp4', 'title': 'メタルファイターMIKU 第1話', 'timestamp': 1580354056, 'uploader_id': '5797077852001', 'upload_date': '20200130', 'duration': 1387.733, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) attrs = extract_attributes(self._search_regex( r'(<video-js[^>]+\bid="bcplayer"[^>]*>)', webpage, 'player')) bc = self._download_json( 'https://pbifcd.b-ch.com/v1/playbackinfo/ST/70/' + attrs['data-info'], video_id, headers={'X-API-KEY': attrs['data-auth'].strip()})['bc'] return self._parse_brightcove_metadata(bc, bc['id'])
0.00076
from pydevd_constants import * import pydevd_tracing import sys import pydev_log _original_excepthook = None _handle_exceptions = None NOTIFY_ALWAYS="NOTIFY_ALWAYS" NOTIFY_ON_TERMINATE="NOTIFY_ON_TERMINATE" if USE_LIB_COPY: import _pydev_threading as threading else: import threading threadingCurrentThread = threading.currentThread from pydevd_comm import GetGlobalDebugger class ExceptionBreakpoint: def __init__(self, qname, notify_always, notify_on_terminate): exctype = get_class(qname) self.qname = qname if exctype is not None: self.name = exctype.__name__ else: self.name = None self.notify_on_terminate = int(notify_on_terminate) == 1 self.notify_always = int(notify_always) > 0 self.notify_on_first_raise_only = int(notify_always) == 2 self.type = exctype self.notify = {NOTIFY_ALWAYS: self.notify_always, NOTIFY_ON_TERMINATE: self.notify_on_terminate} def __str__(self): return self.qname class LineBreakpoint: def __init__(self, type, flag, condition, func_name, expression): self.type = type self.condition = condition self.func_name = func_name self.expression = expression def get_break_dict(self, breakpoints, file): if DictContains(breakpoints, file): breakDict = breakpoints[file] else: breakDict = {} breakpoints[file] = breakDict return breakDict def trace(self, file, line, func_name): if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0: pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name)) sys.stderr.flush() def add(self, breakpoints, file, line, func_name): self.trace(file, line, func_name) breakDict = self.get_break_dict(breakpoints, file) breakDict[line] = self def get_exception_full_qname(exctype): if not exctype: return None return str(exctype.__module__) + '.' + exctype.__name__ def get_exception_name(exctype): if not exctype: return None return exctype.__name__ def get_exception_breakpoint(exctype, exceptions, notify_class): name = get_exception_full_qname(exctype) exc = None if exceptions is not None: for k, e in exceptions.items(): if e.notify[notify_class]: if name == k: return e if (e.type is not None and issubclass(exctype, e.type)): if exc is None or issubclass(e.type, exc.type): exc = e return exc #======================================================================================================================= # excepthook #======================================================================================================================= def excepthook(exctype, value, tb): global _handle_exceptions if _handle_exceptions is not None: exception_breakpoint = get_exception_breakpoint(exctype, _handle_exceptions, NOTIFY_ON_TERMINATE) else: exception_breakpoint = None if exception_breakpoint is None: return _original_excepthook(exctype, value, tb) #Always call the original excepthook before going on to call the debugger post mortem to show it. _original_excepthook(exctype, value, tb) if tb is None: #sometimes it can be None, e.g. with GTK return frames = [] traceback = tb while tb: frames.append(tb.tb_frame) tb = tb.tb_next thread = threadingCurrentThread() frames_byid = dict([(id(frame),frame) for frame in frames]) frame = frames[-1] thread.additionalInfo.exception = (exctype, value, tb) thread.additionalInfo.pydev_force_stop_at_exception = (frame, frames_byid) thread.additionalInfo.message = exception_breakpoint.qname #sys.exc_info = lambda : (exctype, value, traceback) debugger = GetGlobalDebugger() debugger.force_post_mortem_stop += 1 pydevd_tracing.SetTrace(None) #no tracing from here debugger.handle_post_mortem_stop(thread.additionalInfo, thread) #======================================================================================================================= # set_pm_excepthook #======================================================================================================================= def set_pm_excepthook(handle_exceptions_arg=None): ''' Should be called to register the excepthook to be used. It's only useful for uncaucht exceptions. I.e.: exceptions that go up to the excepthook. Can receive a parameter to stop only on some exceptions. E.g.: register_excepthook((IndexError, ValueError)) or register_excepthook(IndexError) if passed without a parameter, will break on any exception @param handle_exceptions: exception or tuple(exceptions) The exceptions that should be handled. ''' global _handle_exceptions global _original_excepthook if sys.excepthook != excepthook: #Only keep the original if it's not our own excepthook (if called many times). _original_excepthook = sys.excepthook _handle_exceptions = handle_exceptions_arg sys.excepthook = excepthook def restore_pm_excepthook(): global _original_excepthook if _original_excepthook: sys.excepthook = _original_excepthook _original_excepthook = None def update_exception_hook(dbg): if dbg.exception_set: set_pm_excepthook(dict(dbg.exception_set)) else: restore_pm_excepthook() def get_class( kls ): if IS_PY24 and "BaseException" == kls: kls = "Exception" parts = kls.split('.') module = ".".join(parts[:-1]) if module == "": if IS_PY3K: module = "builtins" else: module = "__builtin__" try: m = __import__( module ) for comp in parts[-1:]: if m is None: return None m = getattr(m, comp, None) return m except ImportError: return None
0.007019
# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import create_backup from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api import validation from nova import compute from nova import exception ALIAS = "os-create-backup" authorize = extensions.os_compute_authorizer(ALIAS) class CreateBackupController(wsgi.Controller): def __init__(self, *args, **kwargs): super(CreateBackupController, self).__init__(*args, **kwargs) self.compute_api = compute.API(skip_policy_check=True) @extensions.expected_errors((400, 403, 404, 409)) @wsgi.action('createBackup') @validation.schema(create_backup.create_backup) def _create_backup(self, req, id, body): """Backup a server instance. Images now have an `image_type` associated with them, which can be 'snapshot' or the backup type, like 'daily' or 'weekly'. If the image_type is backup-like, then the rotation factor can be included and that will cause the oldest backups that exceed the rotation factor to be deleted. """ context = req.environ["nova.context"] authorize(context) entity = body["createBackup"] image_name = entity["name"] backup_type = entity["backup_type"] rotation = int(entity["rotation"]) props = {} metadata = entity.get('metadata', {}) common.check_img_metadata_properties_quota(context, metadata) props.update(metadata) instance = common.get_instance(self.compute_api, context, id) try: image = self.compute_api.backup(context, instance, image_name, backup_type, rotation, extra_properties=props) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createBackup', id) except exception.InvalidRequest as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) resp = webob.Response(status_int=202) # build location of newly-created image entity if rotation is not zero if rotation > 0: image_id = str(image['id']) image_ref = os.path.join(req.application_url, 'images', image_id) resp.headers['Location'] = image_ref return resp class CreateBackup(extensions.V21APIExtensionBase): """Create a backup of a server.""" name = "CreateBackup" alias = ALIAS version = 1 def get_controller_extensions(self): controller = CreateBackupController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
0.000579
#! /usr/bin/env python # Copyright (c) 2009-2014, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. from collections import namedtuple import glob from optparse import OptionParser import os import re import shutil import subprocess import sys import threading import Queue version = 'build-all.py, version 1.99' build_dir = '../all-kernels' make_command = ["vmlinux", "modules", "dtbs"] all_options = {} compile64 = os.environ.get('CROSS_COMPILE64') def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) if not os.environ.get('CROSS_COMPILE'): fail("CROSS_COMPILE must be set in the environment") def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def build_threads(): """Determine the number of build threads requested by the user""" if all_options.load_average: return all_options.load_average return all_options.jobs or 1 failed_targets = [] BuildResult = namedtuple('BuildResult', ['status', 'messages']) class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])): def set_width(self, width): self.width = width def __enter__(self): self.log = open(self.log_name, 'w') def __exit__(self, type, value, traceback): self.log.close() def run(self): self.status = None messages = ["Building: " + self.short_name] def printer(line): text = "[%-*s] %s" % (self.width, self.short_name, line) messages.append(text) self.log.write(text) self.log.write('\n') for step in self.steps: st = step.run(printer) if st: self.status = BuildResult(self.short_name, messages) break if not self.status: self.status = BuildResult(None, messages) class BuildTracker: """Manages all of the steps necessary to perform a build. The build consists of one or more sequences of steps. The different sequences can be processed independently, while the steps within a sequence must be done in order.""" def __init__(self): self.sequence = [] self.lock = threading.Lock() def add_sequence(self, log_name, short_name, steps): self.sequence.append(BuildSequence(log_name, short_name, steps)) def longest_name(self): longest = 0 for seq in self.sequence: longest = max(longest, len(seq.short_name)) return longest def __repr__(self): return "BuildTracker(%s)" % self.sequence def run_child(self, seq): seq.set_width(self.longest) tok = self.build_tokens.get() with self.lock: print "Building:", seq.short_name with seq: seq.run() self.results.put(seq.status) self.build_tokens.put(tok) def run(self): self.longest = self.longest_name() self.results = Queue.Queue() children = [] errors = [] self.build_tokens = Queue.Queue() nthreads = build_threads() print "Building with", nthreads, "threads" for i in range(nthreads): self.build_tokens.put(True) for seq in self.sequence: child = threading.Thread(target=self.run_child, args=[seq]) children.append(child) child.start() for child in children: stats = self.results.get() if all_options.verbose: with self.lock: for line in stats.messages: print line sys.stdout.flush() if stats.status: errors.append(stats.status) for child in children: child.join() if errors: fail("\n ".join(["Failed targets:"] + errors)) class PrintStep: """A step that just prints a message""" def __init__(self, message): self.message = message def run(self, outp): outp(self.message) class MkdirStep: """A step that makes a directory""" def __init__(self, direc): self.direc = direc def run(self, outp): outp("mkdir %s" % self.direc) os.mkdir(self.direc) class RmtreeStep: def __init__(self, direc): self.direc = direc def run(self, outp): outp("rmtree %s" % self.direc) shutil.rmtree(self.direc, ignore_errors=True) class CopyfileStep: def __init__(self, src, dest): self.src = src self.dest = dest def run(self, outp): outp("cp %s %s" % (self.src, self.dest)) shutil.copyfile(self.src, self.dest) class ExecStep: def __init__(self, cmd, **kwargs): self.cmd = cmd self.kwargs = kwargs def run(self, outp): outp("exec: %s" % (" ".join(self.cmd),)) with open('/dev/null', 'r') as devnull: proc = subprocess.Popen(self.cmd, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **self.kwargs) stdout = proc.stdout while True: line = stdout.readline() if not line: break line = line.rstrip('\n') outp(line) result = proc.wait() if result != 0: return ('error', result) else: return None class Builder(): def __init__(self, name, defconfig): self.name = name self.defconfig = defconfig self.confname = self.defconfig.split('/')[-1] # Determine if this is a 64-bit target based on the location # of the defconfig. self.make_env = os.environ.copy() if "/arm64/" in defconfig: if compile64: self.make_env['CROSS_COMPILE'] = compile64 else: fail("Attempting to build 64-bit, without setting CROSS_COMPILE64") self.make_env['ARCH'] = 'arm64' else: self.make_env['ARCH'] = 'arm' self.make_env['KCONFIG_NOTIMESTAMP'] = 'true' self.log_name = "%s/log-%s.log" % (build_dir, self.name) def build(self): steps = [] dest_dir = os.path.join(build_dir, self.name) log_name = "%s/log-%s.log" % (build_dir, self.name) steps.append(PrintStep('Building %s in %s log %s' % (self.name, dest_dir, log_name))) if not os.path.isdir(dest_dir): steps.append(MkdirStep(dest_dir)) defconfig = self.defconfig dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir staging_dir = 'install_staging' modi_dir = '%s' % staging_dir hdri_dir = '%s/usr' % staging_dir steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir))) with open('/dev/null', 'r') as devnull: subprocess.check_call(['make', 'O=%s' % dest_dir, 'SELINUX_DEFCONFIG=selinux_defconfig', 'SELINUX_LOG_DEFCONFIG=selinux_log_defconfig', 'TIMA_DEFCONFIG=tima_defconfig', self.confname], env=self.make_env, stdin=devnull) if not all_options.updateconfigs: # Build targets can be dependent upon the completion of # previous build targets, so build them one at a time. cmd_line = ['make', 'INSTALL_HDR_PATH=%s' % hdri_dir, 'INSTALL_MOD_PATH=%s' % modi_dir, 'O=%s' % dest_dir] build_targets = [] for c in make_command: if re.match(r'^-{1,2}\w', c): cmd_line.append(c) else: build_targets.append(c) for t in build_targets: steps.append(ExecStep(cmd_line + [t], env=self.make_env)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: steps.append(ExecStep(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=self.make_env)) steps.append(CopyfileStep(savedefconfig, defconfig)) return steps def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) with open(file, 'a') as defconfig: defconfig.write(str + '\n') def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = [] arch_pats = ( r'[fm]sm[0-9]*_defconfig', r'apq*_defconfig', r'qsd*_defconfig', r'mdm*_defconfig', r'mpq*_defconfig', ) arch64_pats = ( r'msm_defconfig', ) for p in arch_pats: for n in glob.glob('arch/arm/configs/' + p): name = os.path.basename(n)[:-10] names.append(Builder(name, n)) if 'CROSS_COMPILE64' in os.environ: for p in arch64_pats: for n in glob.glob('arch/arm64/configs/' + p): name = os.path.basename(n)[:-10] + "-64" names.append(Builder(name, n)) return names def build_many(targets): print "Building %d target(s)" % len(targets) # If we are requesting multiple builds, divide down the job number # to construct the make_command, giving it a floor of 2, so there # is still some parallelism. if all_options.jobs and all_options.jobs > 1: j = max(all_options.jobs / len(targets), 2) make_command.append("-j" + str(j)) tracker = BuildTracker() for target in targets: if all_options.updateconfigs: update_config(target.defconfig, all_options.updateconfigs) steps = target.build() tracker.add_sequence(target.log_name, target.name, steps) tracker.run() def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs: print " %s" % target.name sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if args == ['all']: build_many(configs) elif args == ['perf']: targets = [] for t in configs: if "perf" in t.name: targets.append(t) build_many(targets) elif args == ['noperf']: targets = [] for t in configs: if "perf" not in t.name: targets.append(t) build_many(targets) elif len(args) > 0: all_configs = {} for t in configs: all_configs[t.name] = t targets = [] for t in args: if t not in all_configs: parser.error("Target '%s' not one of %s" % (t, all_configs.keys())) targets.append(all_configs[t]) build_many(targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
0.004551
import vdf implemented_bots = set([ 'npc_dota_hero_axe', 'npc_dota_hero_bane', 'npc_dota_hero_bounty_hunter', 'npc_dota_hero_bloodseeker', 'npc_dota_hero_bristleback', 'npc_dota_hero_chaos_knight', 'npc_dota_hero_crystal_maiden', 'npc_dota_hero_dazzle', 'npc_dota_hero_death_prophet', 'npc_dota_hero_dragon_knight', 'npc_dota_hero_drow_ranger', 'npc_dota_hero_earthshaker', 'npc_dota_hero_jakiro', 'npc_dota_hero_juggernaut', 'npc_dota_hero_kunkka', 'npc_dota_hero_lich', 'npc_dota_hero_lina', 'npc_dota_hero_lion', 'npc_dota_hero_luna', 'npc_dota_hero_necrolyte', 'npc_dota_hero_omniknight', 'npc_dota_hero_oracle', 'npc_dota_hero_phantom_assassin', 'npc_dota_hero_pudge', 'npc_dota_hero_razor', 'npc_dota_hero_sand_king', 'npc_dota_hero_nevermore', 'npc_dota_hero_skywrath_mage', 'npc_dota_hero_sniper', 'npc_dota_hero_sven', 'npc_dota_hero_tidehunter', 'npc_dota_hero_tiny', 'npc_dota_hero_vengefulspirit', 'npc_dota_hero_viper', 'npc_dota_hero_warlock', 'npc_dota_hero_windrunner', 'npc_dota_hero_witch_doctor', 'npc_dota_hero_skeleton_king', 'npc_dota_hero_zuus', ]) heroes = vdf.load(open(r'D:\games\steamapps\common\dota 2 beta\game\dota\scripts\npc\npc_heroes.txt')) with open('hero_bot_data.lua', 'w') as output: # Write module exporting stuff #1 output.write('_G._savedEnv = getfenv()\n') output.write('module("hero_bot_data", package.seeall)\n') output.write('\n') # Collect all hero types hero_types = set() hero_type_ids = {} for name, data in heroes['DOTAHeroes'].iteritems(): if isinstance(data, dict) and 'Bot' in data: this_hero_type = data['Bot']['HeroType'].split('|') for hero_type in this_hero_type: hero_types.add(hero_type.strip()) idx = 1 for hero_type in hero_types: hero_type_ids[hero_type] = idx output.write('%s = %d\n' % (hero_type, idx)) idx *= 2 output.write('\n') # Fill LaningInfo and HeroType output.write('heroes = {\n') supported_list = [] not_supported_list = [] for name, data in heroes['DOTAHeroes'].iteritems(): if isinstance(data, dict) and data.get('CMEnabled', '0') == '1': human_name = data['url'].replace('_', ' ') if 'Bot' not in data: not_supported_list.append(human_name) continue laning_info = [] try: for key, value in data['Bot']['LaningInfo'].iteritems(): laning_info.append('[\'%s\'] = %s' % (key, value)) this_hero_type = 0 this_hero_type_raw = data['Bot']['HeroType'].split('|') for hero_type in this_hero_type_raw: this_hero_type |= hero_type_ids[hero_type.strip()] if ('Loadout' not in data['Bot']) or (name not in implemented_bots): not_supported_list.append(human_name) else: output.write(' [\'%s\'] = {[\'HeroType\'] = %s, [\'LaningInfo\'] = {%s}},\n' % (name, this_hero_type, ', '.join(laning_info))) supported_list.append(human_name) except KeyError as ex: not_supported_list.append(human_name) output.write('}\n\n') # Write module exporting stuff #2 output.write('for k,v in pairs(hero_bot_data) do _G._savedEnv[k] = v end\n') supported_list.sort() print 'Fully operational:' for hero in supported_list: print ' - %s' % hero not_supported_list.sort() print '\nNot supported:' for hero in not_supported_list: print ' - %s' % hero
0.002321
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. NETWORK = 'network' SUBNET = 'subnet' PORT = 'port' SECURITY_GROUP = 'security_group' L2POPULATION = 'l2population' DVR = 'dvr' CREATE = 'create' DELETE = 'delete' UPDATE = 'update' AGENT = 'q-agent-notifier' PLUGIN = 'q-plugin' L3PLUGIN = 'q-l3-plugin' DHCP = 'q-dhcp-notifer' FIREWALL_PLUGIN = 'q-firewall-plugin' METERING_PLUGIN = 'q-metering-plugin' LOADBALANCER_PLUGIN = 'n-lbaas-plugin' L3_AGENT = 'l3_agent' DHCP_AGENT = 'dhcp_agent' METERING_AGENT = 'metering_agent' LOADBALANCER_AGENT = 'n-lbaas_agent' def get_topic_name(prefix, table, operation, host=None): """Create a topic name. The topic name needs to be synced between the agent and the plugin. The plugin will send a fanout message to all of the listening agents so that the agents in turn can perform their updates accordingly. :param prefix: Common prefix for the plugin/agent message queues. :param table: The table in question (NETWORK, SUBNET, PORT). :param operation: The operation that invokes notification (CREATE, DELETE, UPDATE) :param host: Add host to the topic :returns: The topic name. """ if host: return '%s-%s-%s.%s' % (prefix, table, operation, host) return '%s-%s-%s' % (prefix, table, operation)
0
import sim # seed sim.random.seed(13) # environment env = sim.simpy.Environment() # writer # packet_w = Writer("packet_", start="# id src init_time waited_time freq processed_time\n") # default values sim.tg_default_size = lambda x: 5000 sim.tg_default_dist = lambda x: 1 sim.ONU_consumption = lambda x: 15 sim.PN_consumption = lambda x: 25 sim.Ant_consumption = lambda x: 7 sim.DBA_IPACT_default_bandwidth = 5000 # constants # topology antenas = 3 onus = 2 pns = 2 splts = 1 max_freqs = 10 matrix = [ [0,3,10000], [1,3,9000], [2,4,13000], [3,5,500], [4,7,25000], [5,7,23000], [7,6,8000] ] # nodes nodes = sim.create_topology(env, antenas, onus, pns, splts, matrix, max_freqs) # rules nodes[5].end() # node 5 starts offline nodes[0].end() # antenna 0 starts offline nodes[1].end() # antenna 1 starts offline print(nodes[3], "enabled:", nodes[3].enabled) nodes[3].end() # onu 0 starts offline print(nodes[3], "enabled:", nodes[3].enabled) print("Begin.") env.run(until=10) print("End.") # consumption for n in nodes: if(isinstance(n, sim.Splitter)): continue else: print(str(n), "had consumption of:", n.consumption())
0.021441
"""Base implementation of event loop. The event loop can be broken up into a multiplexer (the part responsible for notifying us of IO events) and the event loop proper, which wraps a multiplexer with functionality for scheduling callbacks, immediately or at a given time in the future. Whenever a public API takes a callback, subsequent positional arguments will be passed to the callback if/when it is called. This avoids the proliferation of trivial lambdas implementing closures. Keyword arguments for the callback are not supported; this is a conscious design decision, leaving the door open for keyword arguments to modify the meaning of the API call itself. """ import collections import concurrent.futures import heapq import logging import socket import subprocess import time import os import sys from . import events from . import futures from . import tasks from .log import logger __all__ = ['BaseEventLoop', 'Server'] # Argument for default thread pool executor creation. _MAX_WORKERS = 5 class _StopError(BaseException): """Raised to stop the event loop.""" def _raise_stop_error(*args): raise _StopError class Server(events.AbstractServer): def __init__(self, loop, sockets): self.loop = loop self.sockets = sockets self.active_count = 0 self.waiters = [] def attach(self, transport): assert self.sockets is not None self.active_count += 1 def detach(self, transport): assert self.active_count > 0 self.active_count -= 1 if self.active_count == 0 and self.sockets is None: self._wakeup() def close(self): sockets = self.sockets if sockets is not None: self.sockets = None for sock in sockets: self.loop._stop_serving(sock) if self.active_count == 0: self._wakeup() def _wakeup(self): waiters = self.waiters self.waiters = None for waiter in waiters: if not waiter.done(): waiter.set_result(waiter) @tasks.coroutine def wait_closed(self): if self.sockets is None or self.waiters is None: return waiter = futures.Future(loop=self.loop) self.waiters.append(waiter) yield from waiter class BaseEventLoop(events.AbstractEventLoop): def __init__(self): self._ready = collections.deque() self._scheduled = [] self._default_executor = None self._internal_fds = 0 self._running = False def _make_socket_transport(self, sock, protocol, waiter=None, *, extra=None, server=None): """Create socket transport.""" raise NotImplementedError def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *, server_side=False, server_hostname=None, extra=None, server=None): """Create SSL transport.""" raise NotImplementedError def _make_datagram_transport(self, sock, protocol, address=None, extra=None): """Create datagram transport.""" raise NotImplementedError def _make_read_pipe_transport(self, pipe, protocol, waiter=None, extra=None): """Create read pipe transport.""" raise NotImplementedError def _make_write_pipe_transport(self, pipe, protocol, waiter=None, extra=None): """Create write pipe transport.""" raise NotImplementedError @tasks.coroutine def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): """Create subprocess transport.""" raise NotImplementedError def _read_from_self(self): """XXX""" raise NotImplementedError def _write_to_self(self): """XXX""" raise NotImplementedError def _process_events(self, event_list): """Process selector events.""" raise NotImplementedError def run_forever(self): """Run until stop() is called.""" if self._running: raise RuntimeError('Event loop is running.') self._running = True try: while True: try: self._run_once() except _StopError: break finally: self._running = False def run_until_complete(self, future): """Run until the Future is done. If the argument is a coroutine, it is wrapped in a Task. XXX TBD: It would be disastrous to call run_until_complete() with the same coroutine twice -- it would wrap it in two different Tasks and that can't be good. Return the Future's result, or raise its exception. """ future = tasks.async(future, loop=self) future.add_done_callback(_raise_stop_error) self.run_forever() future.remove_done_callback(_raise_stop_error) if not future.done(): raise RuntimeError('Event loop stopped before Future completed.') return future.result() def stop(self): """Stop running the event loop. Every callback scheduled before stop() is called will run. Callback scheduled after stop() is called won't. However, those callbacks will run if run() is called again later. """ self.call_soon(_raise_stop_error) def close(self): """Close the event loop. This clears the queues and shuts down the executor, but does not wait for the executor to finish. """ self._ready.clear() self._scheduled.clear() executor = self._default_executor if executor is not None: self._default_executor = None executor.shutdown(wait=False) def is_running(self): """Returns running status of event loop.""" return self._running def time(self): """Return the time according to the event loop's clock.""" return time.monotonic() def call_later(self, delay, callback, *args): """Arrange for a callback to be called at a given time. Return a Handle: an opaque object with a cancel() method that can be used to cancel the call. The delay can be an int or float, expressed in seconds. It is always a relative time. Each callback will be called exactly once. If two callbacks are scheduled for exactly the same time, it undefined which will be called first. Any positional arguments after the callback will be passed to the callback when it is called. """ return self.call_at(self.time() + delay, callback, *args) def call_at(self, when, callback, *args): """Like call_later(), but uses an absolute time.""" timer = events.TimerHandle(when, callback, args) heapq.heappush(self._scheduled, timer) return timer def call_soon(self, callback, *args): """Arrange for a callback to be called as soon as possible. This operates as a FIFO queue, callbacks are called in the order in which they are registered. Each callback will be called exactly once. Any positional arguments after the callback will be passed to the callback when it is called. """ handle = events.make_handle(callback, args) self._ready.append(handle) return handle def call_soon_threadsafe(self, callback, *args): """XXX""" handle = self.call_soon(callback, *args) self._write_to_self() return handle def run_in_executor(self, executor, callback, *args): if isinstance(callback, events.Handle): assert not args assert not isinstance(callback, events.TimerHandle) if callback._cancelled: f = futures.Future(loop=self) f.set_result(None) return f callback, args = callback._callback, callback._args if executor is None: executor = self._default_executor if executor is None: executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS) self._default_executor = executor return futures.wrap_future(executor.submit(callback, *args), loop=self) def set_default_executor(self, executor): self._default_executor = executor def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0): return self.run_in_executor(None, socket.getaddrinfo, host, port, family, type, proto, flags) def getnameinfo(self, sockaddr, flags=0): return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) @tasks.coroutine def create_connection(self, protocol_factory, host=None, port=None, *, ssl=None, family=0, proto=0, flags=0, sock=None, local_addr=None, server_hostname=None): """XXX""" if server_hostname is not None and not ssl: raise ValueError('server_hostname is only meaningful with ssl') if server_hostname is None and ssl: # Use host as default for server_hostname. It is an error # if host is empty or not set, e.g. when an # already-connected socket was passed or when only a port # is given. To avoid this error, you can pass # server_hostname='' -- this will bypass the hostname # check. (This also means that if host is a numeric # IP/IPv6 address, we will attempt to verify that exact # address; this will probably fail, but it is possible to # create a certificate for a specific IP address, so we # don't judge it here.) if not host: raise ValueError('You must set server_hostname ' 'when using ssl without a host') server_hostname = host if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') f1 = self.getaddrinfo( host, port, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags) fs = [f1] if local_addr is not None: f2 = self.getaddrinfo( *local_addr, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags) fs.append(f2) else: f2 = None yield from tasks.wait(fs, loop=self) infos = f1.result() if not infos: raise OSError('getaddrinfo() returned empty list') if f2 is not None: laddr_infos = f2.result() if not laddr_infos: raise OSError('getaddrinfo() returned empty list') exceptions = [] for family, type, proto, cname, address in infos: try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) if f2 is not None: for _, _, _, _, laddr in laddr_infos: try: sock.bind(laddr) break except OSError as exc: exc = OSError( exc.errno, 'error while ' 'attempting to bind on address ' '{!r}: {}'.format( laddr, exc.strerror.lower())) exceptions.append(exc) else: sock.close() sock = None continue yield from self.sock_connect(sock, address) except OSError as exc: if sock is not None: sock.close() exceptions.append(exc) else: break else: if len(exceptions) == 1: raise exceptions[0] else: # If they all have the same str(), raise one. model = str(exceptions[0]) if all(str(exc) == model for exc in exceptions): raise exceptions[0] # Raise a combined exception so the user can see all # the various error messages. raise OSError('Multiple exceptions: {}'.format( ', '.join(str(exc) for exc in exceptions))) elif sock is None: raise ValueError( 'host and port was not specified and no sock specified') sock.setblocking(False) protocol = protocol_factory() waiter = futures.Future(loop=self) if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( sock, protocol, sslcontext, waiter, server_side=False, server_hostname=server_hostname) else: transport = self._make_socket_transport(sock, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, family=0, proto=0, flags=0): """Create datagram connection.""" if not (local_addr or remote_addr): if family == 0: raise ValueError('unexpected address family') addr_pairs_info = (((family, proto), (None, None)),) else: # join addresss by (family, protocol) addr_infos = collections.OrderedDict() for idx, addr in ((0, local_addr), (1, remote_addr)): if addr is not None: assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') infos = yield from self.getaddrinfo( *addr, family=family, type=socket.SOCK_DGRAM, proto=proto, flags=flags) if not infos: raise OSError('getaddrinfo() returned empty list') for fam, _, pro, _, address in infos: key = (fam, pro) if key not in addr_infos: addr_infos[key] = [None, None] addr_infos[key][idx] = address # each addr has to have info for each (family, proto) pair addr_pairs_info = [ (key, addr_pair) for key, addr_pair in addr_infos.items() if not ((local_addr and addr_pair[0] is None) or (remote_addr and addr_pair[1] is None))] if not addr_pairs_info: raise ValueError('can not get address information') exceptions = [] for ((family, proto), (local_address, remote_address)) in addr_pairs_info: sock = None r_addr = None try: sock = socket.socket( family=family, type=socket.SOCK_DGRAM, proto=proto) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(False) if local_addr: sock.bind(local_address) if remote_addr: yield from self.sock_connect(sock, remote_address) r_addr = remote_address except OSError as exc: if sock is not None: sock.close() exceptions.append(exc) else: break else: raise exceptions[0] protocol = protocol_factory() transport = self._make_datagram_transport(sock, protocol, r_addr) return transport, protocol @tasks.coroutine def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None): """XXX""" if isinstance(ssl, bool): raise TypeError('ssl argument must be an SSLContext or None') if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') AF_INET6 = getattr(socket, 'AF_INET6', 0) if reuse_address is None: reuse_address = os.name == 'posix' and sys.platform != 'cygwin' sockets = [] if host == '': host = None infos = yield from self.getaddrinfo( host, port, family=family, type=socket.SOCK_STREAM, proto=0, flags=flags) if not infos: raise OSError('getaddrinfo() returned empty list') completed = False try: for res in infos: af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) except socket.error: # Assume it's a bad family/type/protocol combination. continue sockets.append(sock) if reuse_address: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, True) try: sock.bind(sa) except OSError as err: raise OSError(err.errno, 'error while attempting ' 'to bind on address %r: %s' % (sa, err.strerror.lower())) completed = True finally: if not completed: for sock in sockets: sock.close() else: if sock is None: raise ValueError( 'host and port was not specified and no sock specified') sockets = [sock] server = Server(self, sockets) for sock in sockets: sock.listen(backlog) sock.setblocking(False) self._start_serving(protocol_factory, sock, ssl, server) return server @tasks.coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() waiter = futures.Future(loop=self) transport = self._make_read_pipe_transport(pipe, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() waiter = futures.Future(loop=self) transport = self._make_write_pipe_transport(pipe, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=False, shell=True, bufsize=0, **kwargs): assert not universal_newlines, "universal_newlines must be False" assert shell, "shell must be True" assert isinstance(cmd, str), cmd protocol = protocol_factory() transport = yield from self._make_subprocess_transport( protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) return transport, protocol @tasks.coroutine def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=False, shell=False, bufsize=0, **kwargs): assert not universal_newlines, "universal_newlines must be False" assert not shell, "shell must be False" protocol = protocol_factory() transport = yield from self._make_subprocess_transport( protocol, args, False, stdin, stdout, stderr, bufsize, **kwargs) return transport, protocol def _add_callback(self, handle): """Add a Handle to ready or scheduled.""" assert isinstance(handle, events.Handle), 'A Handle is required here' if handle._cancelled: return if isinstance(handle, events.TimerHandle): heapq.heappush(self._scheduled, handle) else: self._ready.append(handle) def _add_callback_signalsafe(self, handle): """Like _add_callback() but called from a signal handler.""" self._add_callback(handle) self._write_to_self() def _run_once(self): """Run one full iteration of the event loop. This calls all currently ready callbacks, polls for I/O, schedules the resulting callbacks, and finally schedules 'call_later' callbacks. """ # Remove delayed calls that were cancelled from head of queue. while self._scheduled and self._scheduled[0]._cancelled: heapq.heappop(self._scheduled) timeout = None if self._ready: timeout = 0 elif self._scheduled: # Compute the desired timeout. when = self._scheduled[0]._when deadline = max(0, when - self.time()) if timeout is None: timeout = deadline else: timeout = min(timeout, deadline) # TODO: Instrumentation only in debug mode? t0 = self.time() event_list = self._selector.select(timeout) t1 = self.time() argstr = '' if timeout is None else '{:.3f}'.format(timeout) if t1-t0 >= 1: level = logging.INFO else: level = logging.DEBUG logger.log(level, 'poll%s took %.3f seconds', argstr, t1-t0) self._process_events(event_list) # Handle 'later' callbacks that are ready. now = self.time() while self._scheduled: handle = self._scheduled[0] if handle._when > now: break handle = heapq.heappop(self._scheduled) self._ready.append(handle) # This is the only place where callbacks are actually *called*. # All other places just add them to ready. # Note: We run all currently scheduled callbacks, but not any # callbacks scheduled by callbacks run this time around -- # they will be run the next time (after another I/O poll). # Use an idiom that is threadsafe without using locks. ntodo = len(self._ready) for i in range(ntodo): handle = self._ready.popleft() if not handle._cancelled: handle._run() handle = None # Needed to break cycles when an exception occurs.
0.000041
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log as logging import paste.urlmap import six if six.PY3: from urllib import request as urllib2 else: import urllib2 from nova.api.openstack import wsgi LOG = logging.getLogger(__name__) _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in urllib2.parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.get_supported_content_types(): mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.get_supported_content_types()) mime_type, app, app_url = self._path_strategy(host, port, path_info) # Accept application/atom+xml for the index query of each API # version mount point as well as the root index if (app_url and app_url + '/' == path_info) or path_info == '/': supported_content_types.append('application/atom+xml') if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['nova.best_content_type'] = mime_type return app(environ, start_response) LOG.debug('Could not find application for %s', environ['PATH_INFO']) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response)
0.000096
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for gmm_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.factorization.python.ops import gmm_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed as random_seed_lib from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging class GmmOpsTest(test.TestCase): def setUp(self): self.num_examples = 1000 self.iterations = 40 self.seed = 4 random_seed_lib.set_random_seed(self.seed) np.random.seed(self.seed * 2) self.data, self.true_assignments = self.make_data(self.num_examples) # Generate more complicated data. self.centers = [[1, 1], [-1, 0.5], [2, 1]] self.more_data, self.more_true_assignments = self.make_data_from_centers( self.num_examples, self.centers) @staticmethod def make_data(num_vectors): """Generates 2-dimensional data centered on (2,2), (-1,-1). Args: num_vectors: number of training examples. Returns: A tuple containing the data as a numpy array and the cluster ids. """ vectors = [] classes = [] for _ in xrange(num_vectors): if np.random.random() > 0.5: vectors.append([np.random.normal(2.0, 0.6), np.random.normal(2.0, 0.9)]) classes.append(0) else: vectors.append( [np.random.normal(-1.0, 0.4), np.random.normal(-1.0, 0.5)]) classes.append(1) return np.asarray(vectors), classes @staticmethod def make_data_from_centers(num_vectors, centers): """Generates 2-dimensional data with random centers. Args: num_vectors: number of training examples. centers: a list of random 2-dimensional centers. Returns: A tuple containing the data as a numpy array and the cluster ids. """ vectors = [] classes = [] for _ in xrange(num_vectors): current_class = np.random.random_integers(0, len(centers) - 1) vectors.append([ np.random.normal(centers[current_class][0], np.random.random_sample()), np.random.normal(centers[current_class][1], np.random.random_sample()) ]) classes.append(current_class) return np.asarray(vectors), len(centers) def test_covariance(self): start_time = time.time() data = self.data.T np_cov = np.cov(data) logging.info('Numpy took %f', time.time() - start_time) start_time = time.time() with self.test_session() as sess: op = gmm_ops._covariance( constant_op.constant( data.T, dtype=dtypes.float32), False) op_diag = gmm_ops._covariance( constant_op.constant( data.T, dtype=dtypes.float32), True) variables.global_variables_initializer().run() tf_cov = sess.run(op) np.testing.assert_array_almost_equal(np_cov, tf_cov) logging.info('Tensorflow took %f', time.time() - start_time) tf_cov = sess.run(op_diag) np.testing.assert_array_almost_equal( np.diag(np_cov), np.ravel(tf_cov), decimal=5) def test_simple_cluster(self): """Tests that the clusters are correct.""" num_classes = 2 graph = ops.Graph() with graph.as_default() as g: g.seed = 5 with self.test_session() as sess: data = constant_op.constant(self.data, dtype=dtypes.float32) _, assignments, _, training_op, init_op, _ = gmm_ops.gmm( data, 'random', num_classes, random_seed=self.seed) variables.global_variables_initializer().run() sess.run(init_op) for _ in xrange(self.iterations): sess.run(training_op) assignments = sess.run(assignments) accuracy = np.mean( np.asarray(self.true_assignments) == np.squeeze(assignments)) logging.info('Accuracy: %f', accuracy) self.assertGreater(accuracy, 0.98) def testParams(self): """Tests that the params work as intended.""" num_classes = 2 with self.test_session() as sess: # Experiment 1. Update weights only. data = constant_op.constant(self.data, dtype=dtypes.float32) gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[3.0, 3.0], [0.0, 0.0]], 'w') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() sess.run(gmm_tool.init_ops()) for _ in xrange(self.iterations): sess.run(training_ops) # Only the probability to each class is updated. alphas = sess.run(gmm_tool.alphas()) self.assertGreater(alphas[1], 0.6) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal(covs[0], covs[1]) # Experiment 2. Update means and covariances. gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[3.0, 3.0], [0.0, 0.0]], 'mc') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() sess.run(gmm_tool.init_ops()) for _ in xrange(self.iterations): sess.run(training_ops) alphas = sess.run(gmm_tool.alphas()) self.assertAlmostEqual(alphas[0], alphas[1]) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal( [[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4) np.testing.assert_almost_equal( [[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4) # Experiment 3. Update covariances only. gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes, [[-1.0, -1.0], [1.0, 1.0]], 'c') training_ops = gmm_tool.training_ops() variables.global_variables_initializer().run() sess.run(gmm_tool.init_ops()) for _ in xrange(self.iterations): sess.run(training_ops) alphas = sess.run(gmm_tool.alphas()) self.assertAlmostEqual(alphas[0], alphas[1]) means = sess.run(gmm_tool.clusters()) np.testing.assert_almost_equal( np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means) covs = sess.run(gmm_tool.covariances()) np.testing.assert_almost_equal( [[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5) np.testing.assert_almost_equal( [[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5) if __name__ == '__main__': test.main()
0.008825
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import unittest from autothreadharness.harness_case import HarnessCase class REED_5_5_5(HarnessCase): role = HarnessCase.ROLE_REED case = '5 5 5' golden_devices_required = 16 def on_dialog(self, dialog, title): pass if __name__ == '__main__': unittest.main()
0
# coding=utf-8 import time import hashlib import tornado.web import tornado.locale from bson.objectid import ObjectId from . import BaseHandler from .utils import username_validator, email_validator class SignupHandler(BaseHandler): def get(self): if self.current_user: self.redirect(self.get_argument('next', '/')) self.render('account/signup.html') def post(self): self.recaptcha_validate() username = self.get_argument('username', None) email = self.get_argument('email', '').lower() password = self.get_argument('password', None) password2 = self.get_argument('password2', None) if not (username and email and password and password2): self.flash('Please fill the required field') if password != password2: self.flash("Password doesn't match") if username and not username_validator.match(username): self.flash('Username is invalid') if email and not email_validator.match(email): self.flash('Not a valid email address') if username and \ self.db.members.find_one({'name_lower': username.lower()}): self.flash('This username is already registered') if email and self.db.members.find_one({'email': email}): self.flash('This email is already registered') if self.messages: self.render('account/signup.html') return password = hashlib.sha1(password + username.lower()).hexdigest() role = 1 if not self.db.members.count(): role = 5 self.db.members.insert({ 'name': username, 'name_lower': username.lower(), 'password': password, 'email': email, 'website': '', 'description': '', 'created': time.time(), 'language': self.settings['default_locale'], 'role': role, # TODO:send mail. 'like': [], # topics 'follow': [], # users 'favorite': [] # nodes }) self.set_secure_cookie('user', password, expires_days=30) self.redirect(self.get_argument('next', '/')) class SigninHandler(BaseHandler): def get(self): if self.current_user: self.redirect(self.get_argument('next', '/')) self.render('account/signin.html') def post(self): username = self.get_argument('username', '').lower() password = self.get_argument('password', None) if not (username and password): self.flash('Please fill the required field') password = hashlib.sha1(password + username).hexdigest() member = self.db.members.find_one({'name_lower': username, 'password': password}) if not member: self.flash('Invalid account or password') self.render('account/signin.html') return self.set_secure_cookie('user', password, expires_days=30) self.redirect(self.get_argument('next', '/')) class SignoutHandler(BaseHandler): def get(self): user_name = self.get_argument('user', None) if user_name != self.current_user['name']: raise tornado.web.HTTPError(403) self.clear_cookie('user') self.redirect(self.get_argument('next', '/')) class SettingsHandler(BaseHandler): @tornado.web.authenticated def get(self): self.render('account/settings.html', locales=self.application.locales) @tornado.web.authenticated def post(self): website = self.get_argument('website', '') description = self.get_argument('description', '') language = self.get_argument('language') if len(description) > 1500: self.flash("The description is too lang") self.db.members.update({'_id': self.current_user['_id']}, {'$set': { 'website': website, 'description': description, 'language': language }}) self.flash('Saved successfully', type='success') self.redirect('/account/settings') class ChangePasswordHandler(BaseHandler): @tornado.web.authenticated def post(self): old_password = self.get_argument('old_password', None) new_password = self.get_argument('new_password', None) if not (old_password and new_password): self.flash('Please fill the required field') key = old_password + self.current_user['name'].lower() password = hashlib.sha1(key).hexdigest() if password != self.current_user['password']: self.flash('Invalid password') if self.messages: self.redirect('/account/settings') return key = new_password + self.current_user['name'].lower() password = str(hashlib.sha1(key).hexdigest()) self.db.members.update({'_id': self.current_user['_id']}, {'$set': {'password': password}}) self.set_secure_cookie('user', password, expires_days=30) self.flash('Saved successfully', type='success') self.redirect('/account/settings') class NotificationsHandler(BaseHandler): @tornado.web.authenticated def get(self): p = int(self.get_argument('p', 1)) notis = self.db.notifications.find({ 'to': self.current_user['name_lower'] }, sort=[('created', -1)]) notis_count = notis.count() per_page = self.settings['notifications_per_page'] notis = notis[(p - 1) * per_page:p * per_page] self.render('account/notifications.html', notis=notis, notis_count=notis_count, p=p) class NotificationsClearHandler(BaseHandler): @tornado.web.authenticated def get(self): self.db.notifications.remove({'to': self.current_user['name_lower']}) self.redirect('/') class NotificationsRemoveHandler(BaseHandler): @tornado.web.authenticated def get(self, id): self.db.notifications.remove({'_id': ObjectId(id)}) self.redirect(self.get_argument('next', '/account/notifications')) handlers = [ (r'/account/signup', SignupHandler), (r'/account/signin', SigninHandler), (r'/account/signout', SignoutHandler), (r'/account/settings', SettingsHandler), (r'/account/password', ChangePasswordHandler), (r'/account/notifications', NotificationsHandler), (r'/account/notifications/clear', NotificationsClearHandler), (r'/account/notifications/(\w+)/remove', NotificationsRemoveHandler), ]
0
# -*- coding: utf-8 -*- # # XBlock documentation build configuration file, created by # sphinx-quickstart on Fri Dec 28 11:23:23 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.todo', 'sphinxcontrib.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'XBlock' copyright = u'2012\N{en dash}2013, edX.org' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.3' # The full version, including alpha/beta/rc tags. release = '0.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # When auto-doc'ing a class, write the class' docstring and the __init__ docstring # into the class docs. autoclass_content = "both" # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'pyramid' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'XBlockdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { #The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', #The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', #Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'XBlock.tex', u'XBlock Documentation', u'edX.org', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'xblock', u'XBlock Documentation', [u'edX.org'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'XBlock', u'XBlock Documentation', u'edX.org', 'XBlock', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
0.006587
# -*- test-case-name: twisted._threads.test.test_threadworker -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Implementation of an L{IWorker} based on native threads and queues. """ from __future__ import absolute_import, division, print_function from zope.interface import implementer from ._ithreads import IExclusiveWorker from ._convenience import Quit _stop = object() @implementer(IExclusiveWorker) class ThreadWorker(object): """ An L{IExclusiveWorker} implemented based on a single thread and a queue. This worker ensures exclusivity (i.e. it is an L{IExclusiveWorker} and not an L{IWorker}) by performing all of the work passed to C{do} on the I{same} thread. """ def __init__(self, startThread, queue): """ Create a L{ThreadWorker} with a function to start a thread and a queue to use to communicate with that thread. @param startThread: a callable that takes a callable to run in another thread. @type startThread: callable taking a 0-argument callable and returning nothing. @param queue: A L{Queue} to use to give tasks to the thread created by C{startThread}. @param queue: L{Queue} """ self._q = queue self._hasQuit = Quit() def work(): for task in iter(queue.get, _stop): task() startThread(work) def do(self, task): """ Perform the given task on the thread owned by this L{ThreadWorker}. @param task: the function to call on a thread. """ self._hasQuit.check() self._q.put(task) def quit(self): """ Reject all future work and stop the thread started by C{__init__}. """ # Reject all future work. Set this _before_ enqueueing _stop, so # that no work is ever enqueued _after_ _stop. self._hasQuit.set() self._q.put(_stop) @implementer(IExclusiveWorker) class LockWorker(object): """ An L{IWorker} implemented based on a mutual-exclusion lock. """ def __init__(self, lock, local): """ @param lock: A mutual-exclusion lock, with C{acquire} and C{release} methods. @type lock: L{threading.Lock} @param local: Local storage. @type local: L{threading.local} """ self._quit = Quit() self._lock = lock self._local = local def do(self, work): """ Do the given work on this thread, with the mutex acquired. If this is called re-entrantly, return and wait for the outer invocation to do the work. @param work: the work to do with the lock held. """ lock = self._lock local = self._local self._quit.check() working = getattr(local, "working", None) if working is None: working = local.working = [] working.append(work) lock.acquire() try: while working: working.pop(0)() finally: lock.release() local.working = None else: working.append(work) def quit(self): """ Quit this L{LockWorker}. """ self._quit.set() self._lock = None
0.00237
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_tenant_span_src_group short_description: Manage SPAN source groups (span:SrcGrp) description: - Manage SPAN source groups on Cisco ACI fabrics. notes: - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. seealso: - module: aci_tenant - name: APIC Management Information Model reference description: More information about the internal APIC class B(span:SrcGrp). link: https://developer.cisco.com/docs/apic-mim-ref/ author: - Jacob McGill (@jmcgill298) version_added: '2.4' options: admin_state: description: - Enable or disable the span sources. - The APIC defaults to C(yes) when unset during creation. type: bool description: description: - The description for Span source group. type: str aliases: [ descr ] dst_group: description: - The Span destination group to associate with the source group. type: str src_group: description: - The name of the Span source group. type: str aliases: [ name ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present tenant: description: - The name of the Tenant. type: str aliases: [ tenant_name ] extends_documentation_fragment: aci ''' EXAMPLES = r''' - aci_tenant_span_src_group: host: apic username: admin password: SomeSecretPassword tenant: production src_group: "{{ src_group }}" dst_group: "{{ dst_group }}" admin_state: "{{ admin_state }}" description: "{{ description }}" delegate_to: localhost ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule def main(): argument_spec = aci_argument_spec() argument_spec.update( admin_state=dict(type='raw'), # Turn into a boolean in v2.9 description=dict(type='str', aliases=['descr']), dst_group=dict(type='str'), src_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects state=dict(type='str', default='present', choices=['absent', 'present', 'query']), tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['src_group', 'tenant']], ['state', 'present', ['src_group', 'tenant']], ], ) aci = ACIModule(module) admin_state = aci.boolean(module.params['admin_state'], 'enabled', 'disabled') description = module.params['description'] dst_group = module.params['dst_group'] src_group = module.params['src_group'] state = module.params['state'] tenant = module.params['tenant'] aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), module_object=tenant, target_filter={'name': tenant}, ), subclass_1=dict( aci_class='spanSrcGrp', aci_rn='srcgrp-{0}'.format(src_group), module_object=src_group, target_filter={'name': src_group}, ), child_classes=['spanSpanLbl'], ) aci.get_existing() if state == 'present': aci.payload( aci_class='spanSrcGrp', class_config=dict( adminSt=admin_state, descr=description, name=src_group, ), child_configs=[{'spanSpanLbl': {'attributes': {'name': dst_group}}}], ) aci.get_diff(aci_class='spanSrcGrp') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
0.001549
# diff.py # Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors # # This module is part of GitPython and is released under # the BSD License: http://www.opensource.org/licenses/bsd-license.php import re from objects.blob import Blob from objects.util import mode_str_to_int from exc import GitCommandError from gitdb.util import hex_to_bin __all__ = ('Diffable', 'DiffIndex', 'Diff') class Diffable(object): """Common interface for all object that can be diffed against another object of compatible type. :note: Subclasses require a repo member as it is the case for Object instances, for practical reasons we do not derive from Object.""" __slots__ = tuple() # standin indicating you want to diff against the index class Index(object): pass def _process_diff_args(self, args): """ :return: possibly altered version of the given args list. Method is called right before git command execution. Subclasses can use it to alter the behaviour of the superclass""" return args def diff(self, other=Index, paths=None, create_patch=False, **kwargs): """Creates diffs between two items being trees, trees and index or an index and the working tree. :param other: Is the item to compare us with. If None, we will be compared to the working tree. If Treeish, it will be compared against the respective tree If Index ( type ), it will be compared against the index. It defaults to Index to assure the method will not by-default fail on bare repositories. :param paths: is a list of paths or a single path to limit the diff to. It will only include at least one of the givne path or paths. :param create_patch: If True, the returned Diff contains a detailed patch that if applied makes the self to other. Patches are somwhat costly as blobs have to be read and diffed. :param kwargs: Additional arguments passed to git-diff, such as R=True to swap both sides of the diff. :return: git.DiffIndex :note: Rename detection will only work if create_patch is True. On a bare repository, 'other' needs to be provided as Index or as as Tree/Commit, or a git command error will occour""" args = list() args.append( "--abbrev=40" ) # we need full shas args.append( "--full-index" ) # get full index paths, not only filenames if create_patch: args.append("-p") args.append("-M") # check for renames else: args.append("--raw") if paths is not None and not isinstance(paths, (tuple,list)): paths = [ paths ] if other is not None and other is not self.Index: args.insert(0, other) if other is self.Index: args.insert(0, "--cached") args.insert(0,self) # paths is list here or None if paths: args.append("--") args.extend(paths) # END paths handling kwargs['as_process'] = True proc = self.repo.git.diff(*self._process_diff_args(args), **kwargs) diff_method = Diff._index_from_raw_format if create_patch: diff_method = Diff._index_from_patch_format index = diff_method(self.repo, proc.stdout) status = proc.wait() return index class DiffIndex(list): """Implements an Index for diffs, allowing a list of Diffs to be queried by the diff properties. The class improves the diff handling convenience""" # change type invariant identifying possible ways a blob can have changed # A = Added # D = Deleted # R = Renamed # M = modified change_type = ("A", "D", "R", "M") def iter_change_type(self, change_type): """ :return: iterator yieling Diff instances that match the given change_type :param change_type: Member of DiffIndex.change_type, namely: * 'A' for added paths * 'D' for deleted paths * 'R' for renamed paths * 'M' for paths with modified data""" if change_type not in self.change_type: raise ValueError( "Invalid change type: %s" % change_type ) for diff in self: if change_type == "A" and diff.new_file: yield diff elif change_type == "D" and diff.deleted_file: yield diff elif change_type == "R" and diff.renamed: yield diff elif change_type == "M" and diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob: yield diff # END for each diff class Diff(object): """A Diff contains diff information between two Trees. It contains two sides a and b of the diff, members are prefixed with "a" and "b" respectively to inidcate that. Diffs keep information about the changed blob objects, the file mode, renames, deletions and new files. There are a few cases where None has to be expected as member variable value: ``New File``:: a_mode is None a_blob is None ``Deleted File``:: b_mode is None b_blob is None ``Working Tree Blobs`` When comparing to working trees, the working tree blob will have a null hexsha as a corresponding object does not yet exist. The mode will be null as well. But the path will be available though. If it is listed in a diff the working tree version of the file must be different to the version in the index or tree, and hence has been modified.""" # precompiled regex re_header = re.compile(r""" #^diff[ ]--git [ ]a/(?P<a_path>\S+)[ ]b/(?P<b_path>\S+)\n (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n ^rename[ ]from[ ](?P<rename_from>\S+)\n ^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))? (?:^old[ ]mode[ ](?P<old_mode>\d+)\n ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? """, re.VERBOSE | re.MULTILINE) # can be used for comparisons NULL_HEX_SHA = "0"*40 NULL_BIN_SHA = "\0"*20 __slots__ = ("a_blob", "b_blob", "a_mode", "b_mode", "new_file", "deleted_file", "rename_from", "rename_to", "diff") def __init__(self, repo, a_path, b_path, a_blob_id, b_blob_id, a_mode, b_mode, new_file, deleted_file, rename_from, rename_to, diff): self.a_mode = a_mode self.b_mode = b_mode if self.a_mode: self.a_mode = mode_str_to_int(self.a_mode) if self.b_mode: self.b_mode = mode_str_to_int(self.b_mode) if a_blob_id is None: self.a_blob = None else: self.a_blob = Blob(repo, hex_to_bin(a_blob_id), mode=self.a_mode, path=a_path) if b_blob_id is None: self.b_blob = None else: self.b_blob = Blob(repo, hex_to_bin(b_blob_id), mode=self.b_mode, path=b_path) self.new_file = new_file self.deleted_file = deleted_file # be clear and use None instead of empty strings self.rename_from = rename_from or None self.rename_to = rename_to or None self.diff = diff def __eq__(self, other): for name in self.__slots__: if getattr(self, name) != getattr(other, name): return False # END for each name return True def __ne__(self, other): return not ( self == other ) def __hash__(self): return hash(tuple(getattr(self,n) for n in self.__slots__)) def __str__(self): h = "%s" if self.a_blob: h %= self.a_blob.path elif self.b_blob: h %= self.b_blob.path msg = '' l = None # temp line ll = 0 # line length for b,n in zip((self.a_blob, self.b_blob), ('lhs', 'rhs')): if b: l = "\n%s: %o | %s" % (n, b.mode, b.sha) else: l = "\n%s: None" % n # END if blob is not None ll = max(len(l), ll) msg += l # END for each blob # add headline h += '\n' + '='*ll if self.deleted_file: msg += '\nfile deleted in rhs' if self.new_file: msg += '\nfile added in rhs' if self.rename_from: msg += '\nfile renamed from %r' % self.rename_from if self.rename_to: msg += '\nfile renamed to %r' % self.rename_to if self.diff: msg += '\n---' msg += self.diff msg += '\n---' # END diff info return h + msg @property def renamed(self): """:returns: True if the blob of our diff has been renamed""" return self.rename_from != self.rename_to @classmethod def _index_from_patch_format(cls, repo, stream): """Create a new DiffIndex from the given text which must be in patch format :param repo: is the repository we are operating on - it is required :param stream: result of 'git diff' as a stream (supporting file protocol) :return: git.DiffIndex """ # for now, we have to bake the stream text = stream.read() index = DiffIndex() diff_header = cls.re_header.match for diff in ('\n' + text).split('\ndiff --git')[1:]: header = diff_header(diff) a_path, b_path, similarity_index, rename_from, rename_to, \ old_mode, new_mode, new_file_mode, deleted_file_mode, \ a_blob_id, b_blob_id, b_mode = header.groups() new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode) index.append(Diff(repo, a_path, b_path, a_blob_id, b_blob_id, old_mode or deleted_file_mode, new_mode or new_file_mode or b_mode, new_file, deleted_file, rename_from, rename_to, diff[header.end():])) return index @classmethod def _index_from_raw_format(cls, repo, stream): """Create a new DiffIndex from the given stream which must be in raw format. :note: This format is inherently incapable of detecting renames, hence we only modify, delete and add files :return: git.DiffIndex""" # handles # :100644 100644 6870991011cc8d9853a7a8a6f02061512c6a8190 37c5e30c879213e9ae83b21e9d11e55fc20c54b7 M .gitignore index = DiffIndex() for line in stream: if not line.startswith(":"): continue # END its not a valid diff line old_mode, new_mode, a_blob_id, b_blob_id, change_type, path = line[1:].split(None, 5) path = path.strip() a_path = path b_path = path deleted_file = False new_file = False # NOTE: We cannot conclude from the existance of a blob to change type # as diffs with the working do not have blobs yet if change_type == 'D': b_blob_id = None deleted_file = True elif change_type == 'A': a_blob_id = None new_file = True # END add/remove handling diff = Diff(repo, a_path, b_path, a_blob_id, b_blob_id, old_mode, new_mode, new_file, deleted_file, None, None, '') index.append(diff) # END for each line return index
0.040734
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. import sys import traceback from MaKaC.common.fossilize import Fossilizable, fossilizes from MaKaC.fossils.error import ICausedErrorFossil, INoReportErrorFossil, IWarningFossil,\ IResultWithWarningFossil, IResultWithHighlightFossil class CausedError(Exception, Fossilizable): """ A normal error, triggered on the server side """ fossilizes(ICausedErrorFossil) def __init__(self, code, message, inner=None, type=None): self.code = code self.message = message self.inner = inner self.type = type def getMessage(self): return self.message def getCode(self): return self.code def getInner(self): return self.inner def getType(self): return self.type def __str__(self): if not self.inner: return "%s : %s (no inner exception)" % (self.code, self.message) else: if type(self.inner) is list: inner = "\r\n".join(self.inner) else: inner = self.inner return "%s : %s\r\n\r\nInner Exception:\r\n%s" % (self.code, self.message, inner) class NoReportError(CausedError): """ An error that doesn't get reported (no log entry, no warning e-mail, no error report form) """ fossilizes(INoReportErrorFossil) def __init__(self, message, inner=None, title=None, explanation=None): CausedError.__init__(self, "", message, inner, "noReport") self._title = title self._explanation = explanation def getTitle(self): """ A title for the error (optional) """ return self._title def getExplanation(self): return self._explanation class CSRFError(NoReportError): def __init__(self): NoReportError.__init__(self, _('Oops, looks like there was a problem with your current session. Please refresh the page and try again.')) self.code = 'ERR-CSRF' class RequestError(CausedError): pass class ProcessError(CausedError): def __init__(self, code, message): CausedError.__init__(self, code, message, inner = traceback.format_exception(*sys.exc_info())) class ServiceError(CausedError): def __init__(self, code='', message='', inner = None): CausedError.__init__(self, code, message, inner) class PermissionError(CausedError): pass class HTMLSecurityError(CausedError): pass class ServiceAccessError(NoReportError): pass class TimingNoReportError(NoReportError): pass class Warning(Fossilizable): fossilizes(IWarningFossil) def __init__(self, title, content): self._title = title self._content = content def getTitle(self): return self._title def getProblems(self): return self._content class ResultWithWarning(Fossilizable): fossilizes(IResultWithWarningFossil) def __init__(self, result, warning): self._result = result self._warning = warning def getResult(self): return self._result def getWarning(self): return self._warning def hasWarning(self): return True class ResultWithHighlight(Fossilizable): fossilizes(IResultWithHighlightFossil) def __init__(self, result, highlight): self._result = result self._highlight = highlight def getResult(self): return self._result def getHighlight(self): return self._highlight def hasHighlight(self): return True
0.007503
# -*- coding: utf-8 -*- # Copyright (c) 2002 - 2013 Detlev Offenbach <[email protected]> # """ Module implementing the debug base class. """ import sys import traceback import bdb import os import types import atexit import inspect from DebugProtocol import * gRecursionLimit = 64 def printerr(s): """ Module function used for debugging the debug client. @param s data to be printed """ import sys sys.__stderr__.write('%s\n' % unicode(s)) sys.__stderr__.flush() def setRecursionLimit(limit): """ Module function to set the recursion limit. @param limit recursion limit (integer) """ global gRecursionLimit gRecursionLimit = limit class DebugBase(bdb.Bdb): """ Class implementing base class of the debugger. Provides simple wrapper methods around bdb for the 'owning' client to call to step etc. """ def __init__(self, dbgClient): """ Constructor @param dbgClient the owning client """ bdb.Bdb.__init__(self) self._dbgClient = dbgClient self._mainThread = 1 self.breaks = self._dbgClient.breakpoints self.__event = "" self.__isBroken = "" self.cFrame = None # current frame we are at self.currentFrame = None self.currentFrameLocals = None # frame that we are stepping in, can be different than currentFrame self.stepFrame = None # provide a hook to perform a hard breakpoint # Use it like this: # if hasattr(sys, 'breakpoint): sys.breakpoint() sys.breakpoint = self.set_trace # initialize parent bdb.Bdb.reset(self) self.__recursionDepth = -1 self.setRecursionDepth(inspect.currentframe()) def getCurrentFrame(self): """ Public method to return the current frame. @return the current frame """ return self.currentFrame def getCurrentFrameLocals(self): """ Public method to return the locals dictionary of the current frame. @return locals dictionary of the current frame """ return self.currentFrameLocals def step(self, traceMode): """ Public method to perform a step operation in this thread. @param traceMode If it is non-zero, then the step is a step into, otherwise it is a step over. """ self.stepFrame = self.currentFrame if traceMode: self.currentFrame = None self.set_step() else: self.set_next(self.currentFrame) def stepOut(self): """ Public method to perform a step out of the current call. """ self.stepFrame = self.currentFrame self.set_return(self.currentFrame) def go(self, special): """ Public method to resume the thread. It resumes the thread stopping only at breakpoints or exceptions. @param special flag indicating a special continue operation """ self.currentFrame = None self.set_continue(special) def setRecursionDepth(self, frame): """ Public method to determine the current recursion depth. @param frame The current stack frame. """ self.__recursionDepth = 0 while frame is not None: self.__recursionDepth += 1 frame = frame.f_back def profile(self, frame, event, arg): """ Public method used to trace some stuff independent of the debugger trace function. @param frame The current stack frame. @param event The trace event (string) @param arg The arguments """ if event == 'return': self.cFrame = frame.f_back self.__recursionDepth -= 1 elif event == 'call': self.cFrame = frame self.__recursionDepth += 1 if self.__recursionDepth > gRecursionLimit: raise RuntimeError('maximum recursion depth exceeded\n' '(offending frame is two down the stack)') def trace_dispatch(self, frame, event, arg): """ Reimplemented from bdb.py to do some special things. This specialty is to check the connection to the debug server for new events (i.e. new breakpoints) while we are going through the code. @param frame The current stack frame. @param event The trace event (string) @param arg The arguments @return local trace function """ if self.quitting: return # None # give the client a chance to push through new break points. self._dbgClient.eventPoll() self.__event == event self.__isBroken = False if event == 'line': return self.dispatch_line(frame) if event == 'call': return self.dispatch_call(frame, arg) if event == 'return': return self.dispatch_return(frame, arg) if event == 'exception': return self.dispatch_exception(frame, arg) if event == 'c_call': return self.trace_dispatch if event == 'c_exception': return self.trace_dispatch if event == 'c_return': return self.trace_dispatch print 'DebugBase.trace_dispatch: unknown debugging event:', `event` return self.trace_dispatch def dispatch_line(self, frame): """ Reimplemented from bdb.py to do some special things. This speciality is to check the connection to the debug server for new events (i.e. new breakpoints) while we are going through the code. @param frame The current stack frame. @return local trace function """ if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise bdb.BdbQuit return self.trace_dispatch def dispatch_return(self, frame, arg): """ Reimplemented from bdb.py to handle passive mode cleanly. @param frame The current stack frame. @param arg The arguments @return local trace function """ if self.stop_here(frame) or frame == self.returnframe: self.user_return(frame, arg) if self.quitting and not self._dbgClient.passive: raise bdb.BdbQuit return self.trace_dispatch def dispatch_exception(self, frame, arg): """ Reimplemented from bdb.py to always call user_exception. @param frame The current stack frame. @param arg The arguments @return local trace function """ if not self.__skip_it(frame): self.user_exception(frame, arg) if self.quitting: raise bdb.BdbQuit return self.trace_dispatch def set_trace(self, frame = None): """ Overridden method of bdb.py to do some special setup. @param frame frame to start debugging from """ bdb.Bdb.set_trace(self, frame) sys.setprofile(self.profile) def set_continue(self, special): """ Reimplemented from bdb.py to always get informed of exceptions. @param special flag indicating a special continue operation """ # Modified version of the one found in bdb.py # Here we only set a new stop frame if it is a normal continue. if not special: self.stopframe = self.botframe self.returnframe = None self.quitting = 0 def set_quit(self): """ Public method to quit. It wraps call to bdb to clear the current frame properly. """ self.currentFrame = None sys.setprofile(None) bdb.Bdb.set_quit(self) def fix_frame_filename(self, frame): """ Public method used to fixup the filename for a given frame. The logic employed here is that if a module was loaded from a .pyc file, then the correct .py to operate with should be in the same path as the .pyc. The reason this logic is needed is that when a .pyc file is generated, the filename embedded and thus what is readable in the code object of the frame object is the fully qualified filepath when the pyc is generated. If files are moved from machine to machine this can break debugging as the .pyc will refer to the .py on the original machine. Another case might be sharing code over a network... This logic deals with that. @param frame the frame object """ # get module name from __file__ if frame.f_globals.has_key('__file__') and \ frame.f_globals['__file__'] and \ frame.f_globals['__file__'] == frame.f_code.co_filename: root, ext = os.path.splitext(frame.f_globals['__file__']) if ext == '.pyc' or ext == '.py' or ext == '.pyo': fixedName = root + '.py' if os.path.exists(fixedName): return fixedName return frame.f_code.co_filename def set_watch(self, cond, temporary=0): """ Public method to set a watch expression. @param cond expression of the watch expression (string) @param temporary flag indicating a temporary watch expression (boolean) """ bp = bdb.Breakpoint("Watch", 0, temporary, cond) if cond.endswith('??created??') or cond.endswith('??changed??'): bp.condition, bp.special = cond.split() else: bp.condition = cond bp.special = "" bp.values = {} if not self.breaks.has_key("Watch"): self.breaks["Watch"] = 1 else: self.breaks["Watch"] += 1 def clear_watch(self, cond): """ Public method to clear a watch expression. @param cond expression of the watch expression to be cleared (string) """ try: possibles = bdb.Breakpoint.bplist["Watch", 0] for i in range(0, len(possibles)): b = possibles[i] if b.cond == cond: b.deleteMe() self.breaks["Watch"] -= 1 if self.breaks["Watch"] == 0: del self.breaks["Watch"] break except KeyError: pass def get_watch(self, cond): """ Public method to get a watch expression. @param cond expression of the watch expression to be cleared (string) """ possibles = bdb.Breakpoint.bplist["Watch", 0] for i in range(0, len(possibles)): b = possibles[i] if b.cond == cond: return b def __do_clearWatch(self, cond): """ Private method called to clear a temporary watch expression. @param cond expression of the watch expression to be cleared (string) """ self.clear_watch(cond) self._dbgClient.write('%s%s\n' % (ResponseClearWatch, cond)) def __effective(self, frame): """ Private method to determine, if a watch expression is effective. @param frame the current execution frame @return tuple of watch expression and a flag to indicate, that a temporary watch expression may be deleted (bdb.Breakpoint, boolean) """ possibles = bdb.Breakpoint.bplist["Watch", 0] for i in range(0, len(possibles)): b = possibles[i] if b.enabled == 0: continue if not b.cond: # watch expression without expression shouldn't occur, just ignore it continue try: val = eval(b.condition, frame.f_globals, frame.f_locals) if b.special: if b.special == '??created??': if b.values[frame][0] == 0: b.values[frame][0] = 1 b.values[frame][1] = val return (b, 1) else: continue b.values[frame][0] = 1 if b.special == '??changed??': if b.values[frame][1] != val: b.values[frame][1] = val if b.values[frame][2] > 0: b.values[frame][2] -= 1 continue else: return (b, 1) else: continue continue if val: if b.ignore > 0: b.ignore -= 1 continue else: return (b, 1) except: if b.special: try: b.values[frame][0] = 0 except KeyError: b.values[frame] = [0, None, b.ignore] continue return (None, None) def break_here(self, frame): """ Reimplemented from bdb.py to fix the filename from the frame. See fix_frame_filename for more info. @param frame the frame object @return flag indicating the break status (boolean) """ filename = self.canonic(self.fix_frame_filename(frame)) if not self.breaks.has_key(filename) and not self.breaks.has_key("Watch"): return 0 if self.breaks.has_key(filename): lineno = frame.f_lineno if lineno in self.breaks[filename]: # flag says ok to delete temp. bp (bp, flag) = bdb.effective(filename, lineno, frame) if bp: self.currentbp = bp.number if (flag and bp.temporary): self.__do_clear(filename, lineno) return 1 if self.breaks.has_key("Watch"): # flag says ok to delete temp. bp (bp, flag) = self.__effective(frame) if bp: self.currentbp = bp.number if (flag and bp.temporary): self.__do_clearWatch(bp.cond) return 1 return 0 def break_anywhere(self, frame): """ Reimplemented from bdb.py to do some special things. These speciality is to fix the filename from the frame (see fix_frame_filename for more info). @param frame the frame object @return flag indicating the break status (boolean) """ return self.breaks.has_key( self.canonic(self.fix_frame_filename(frame))) or \ (self.breaks.has_key("Watch") and self.breaks["Watch"]) def get_break(self, filename, lineno): """ Reimplemented from bdb.py to get the first breakpoint of a particular line. Because eric4 supports only one breakpoint per line, this overwritten method will return this one and only breakpoint. @param filename the filename of the bp to retrieve (string) @param ineno the linenumber of the bp to retrieve (integer) @return breakpoint or None, if there is no bp """ filename = self.canonic(filename) return self.breaks.has_key(filename) and \ lineno in self.breaks[filename] and \ bdb.Breakpoint.bplist[filename, lineno][0] or None def __do_clear(self, filename, lineno): """ Private method called to clear a temporary breakpoint. @param filename name of the file the bp belongs to @param lineno linenumber of the bp """ self.clear_break(filename, lineno) self._dbgClient.write('%s%s,%d\n' % (ResponseClearBreak, filename, lineno)) def getStack(self): """ Public method to get the stack. @return list of lists with file name (string), line number (integer) and function name (string) """ fr = self.cFrame stack = [] while fr is not None: fname = self._dbgClient.absPath(self.fix_frame_filename(fr)) fline = fr.f_lineno ffunc = fr.f_code.co_name if ffunc == '?': ffunc = '' stack.append([fname, fline, ffunc]) if fr == self._dbgClient.mainFrame: fr = None else: fr = fr.f_back return stack def user_line(self, frame): """ Reimplemented to handle the program about to execute a particular line. @param frame the frame object """ line = frame.f_lineno # We never stop on line 0. if line == 0: return fn = self._dbgClient.absPath(self.fix_frame_filename(frame)) # See if we are skipping at the start of a newly loaded program. if self._dbgClient.mainFrame is None: if fn != self._dbgClient.getRunning(): return self._dbgClient.mainFrame = frame self.currentFrame = frame self.currentFrameLocals = frame.f_locals # remember the locals because it is reinitialized when accessed fr = frame stack = [] while fr is not None: # Reset the trace function so we can be sure # to trace all functions up the stack... This gets around # problems where an exception/breakpoint has occurred # but we had disabled tracing along the way via a None # return from dispatch_call fr.f_trace = self.trace_dispatch fname = self._dbgClient.absPath(self.fix_frame_filename(fr)) fline = fr.f_lineno ffunc = fr.f_code.co_name if ffunc == '?': ffunc = '' stack.append([fname, fline, ffunc]) if fr == self._dbgClient.mainFrame: fr = None else: fr = fr.f_back self.__isBroken = True self._dbgClient.write('%s%s\n' % (ResponseLine, unicode(stack))) self._dbgClient.eventLoop() def user_exception(self,frame,(exctype,excval,exctb),unhandled=0): """ Reimplemented to report an exception to the debug server. @param frame the frame object @param exctype the type of the exception @param excval data about the exception @param exctb traceback for the exception @param unhandled flag indicating an uncaught exception """ if exctype in [SystemExit, bdb.BdbQuit]: atexit._run_exitfuncs() if excval is None: excval = 0 elif isinstance(excval, (unicode, str)): self._dbgClient.write(excval) excval = 1 if isinstance(excval, int): self._dbgClient.progTerminated(excval) else: self._dbgClient.progTerminated(excval.code) return elif exctype in [SyntaxError, IndentationError]: try: message, (filename, linenr, charnr, text) = excval except ValueError: exclist = [] else: exclist = [message, [filename, linenr, charnr]] self._dbgClient.write("%s%s\n" % (ResponseSyntax, unicode(exclist))) else: if type(exctype) in [types.ClassType, # Python up to 2.4 types.TypeType]: # Python 2.5+ exctype = exctype.__name__ if excval is None: excval = '' if unhandled: exctypetxt = "unhandled %s" % unicode(exctype) else: exctypetxt = unicode(exctype) try: exclist = [exctypetxt, unicode(excval).encode(self._dbgClient.getCoding())] except TypeError: exclist = [exctypetxt, str(excval)] if exctb: frlist = self.__extract_stack(exctb) frlist.reverse() self.currentFrame = frlist[0] self.currentFrameLocals = frlist[0].f_locals # remember the locals because it is reinitialized when accessed for fr in frlist: filename = self._dbgClient.absPath(self.fix_frame_filename(fr)) linenr = fr.f_lineno if os.path.basename(filename).startswith("DebugClient") or \ os.path.basename(filename) == "bdb.py": break exclist.append([filename, linenr]) self._dbgClient.write("%s%s\n" % (ResponseException, unicode(exclist))) if exctb is None: return self._dbgClient.eventLoop() def __extract_stack(self, exctb): """ Private member to return a list of stack frames. @param exctb exception traceback @return list of stack frames """ tb = exctb stack = [] while tb is not None: stack.append(tb.tb_frame) tb = tb.tb_next tb = None return stack def user_return(self,frame,retval): """ Reimplemented to report program termination to the debug server. @param frame the frame object @param retval the return value of the program """ # The program has finished if we have just left the first frame. if frame == self._dbgClient.mainFrame and \ self._mainThread: atexit._run_exitfuncs() self._dbgClient.progTerminated(retval) elif frame is not self.stepFrame: self.stepFrame = None self.user_line(frame) def stop_here(self,frame): """ Reimplemented to filter out debugger files. Tracing is turned off for files that are part of the debugger that are called from the application being debugged. @param frame the frame object @return flag indicating whether the debugger should stop here """ if self.__skip_it(frame): return 0 return bdb.Bdb.stop_here(self,frame) def __skip_it(self, frame): """ Private method to filter out debugger files. Tracing is turned off for files that are part of the debugger that are called from the application being debugged. @param frame the frame object @return flag indicating whether the debugger should skip this frame """ fn = self.fix_frame_filename(frame) # Eliminate things like <string> and <stdin>. if fn[0] == '<': return 1 #XXX - think of a better way to do this. It's only a convience for #debugging the debugger - when the debugger code is in the current #directory. if os.path.basename(fn) in [\ 'AsyncFile.py', 'AsyncIO.py', 'DebugConfig.py', 'DCTestResult.py', 'DebugBase.py', 'DebugClientBase.py', 'DebugClientCapabilities.py', 'DebugClient.py', 'DebugClientThreads.py', 'DebugProtocol.py', 'DebugThread.py', 'FlexCompleter.py', 'PyProfile.py'] or \ os.path.dirname(fn).endswith("coverage"): return 1 if self._dbgClient.shouldSkip(fn): return 1 return 0 def isBroken(self): """ Public method to return the broken state of the debugger. @return flag indicating the broken state (boolean) """ return self.__isBroken def getEvent(self): """ Public method to return the last debugger event. @return last debugger event (string) """ return self.__event
0.006054
#!/usr/bin/env python # -*- coding: utf-8 -*- """ get_base_library_packages.py This pulls data from the Python documentation listing https://docs.python.org/2/py-modindex.html I had to do "View Source" on that page to find something that uniquely identified the things on the page I wanted and then I could use BeautifulSoup to get those things. """ import requests # for HTTP requests and much more import sys # to get command line arguments from bs4 import BeautifulSoup # to parse XML / html default_outfile_name = "base_library_packages.txt" def get_base_packages(outfile_name=default_outfile_name): response = requests.get("https://docs.python.org/2/py-modindex.html") soup = BeautifulSoup(response.text) modules = soup.find_all("tt", attrs={"class":"xref"}) with open(outfile_name, "w") as outfile: for m in modules: if '.' not in m.text: # Ignore submodules by skipping anything # that has a '.' in the name outfile.write(m.text) outfile.write("\n") if __name__ == "__main__": if len(sys.argv) > 1: print "Writing module list to file: ", sys.argv[1] get_base_packages(sys.argv[1]) else: print "Writing module list to file: ", default_outfile_name get_base_packages()
0.001509
from datetime import datetime, timedelta from io import StringIO import warnings import numpy as np import pytest from pandas import ( Categorical, DataFrame, Series, date_range, option_context, period_range, ) import pandas._testing as tm import pandas.io.formats.format as fmt class TestDataFrameReprInfoEtc: def test_repr_empty(self): # empty repr(DataFrame()) # empty with index frame = DataFrame(index=np.arange(1000)) repr(frame) def test_repr_mixed(self, float_string_frame): buf = StringIO() # mixed repr(float_string_frame) float_string_frame.info(verbose=False, buf=buf) @pytest.mark.slow def test_repr_mixed_big(self): # big mixed biggie = DataFrame( {"A": np.random.randn(200), "B": tm.makeStringIndex(200)}, index=range(200) ) biggie.loc[:20, "A"] = np.nan biggie.loc[:20, "B"] = np.nan repr(biggie) def test_repr(self, float_frame): buf = StringIO() # small one repr(float_frame) float_frame.info(verbose=False, buf=buf) # even smaller float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf) float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf) # exhausting cases in DataFrame.info # columns but no index no_index = DataFrame(columns=[0, 1, 3]) repr(no_index) # no columns or index DataFrame().info(buf=buf) df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"]) assert "\t" not in repr(df) assert "\r" not in repr(df) assert "a\n" not in repr(df) def test_repr_dimensions(self): df = DataFrame([[1, 2], [3, 4]]) with option_context("display.show_dimensions", True): assert "2 rows x 2 columns" in repr(df) with option_context("display.show_dimensions", False): assert "2 rows x 2 columns" not in repr(df) with option_context("display.show_dimensions", "truncate"): assert "2 rows x 2 columns" not in repr(df) @pytest.mark.slow def test_repr_big(self): # big one biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200)) repr(biggie) def test_repr_unsortable(self, float_frame): # columns are not sortable warn_filters = warnings.filters warnings.filterwarnings("ignore", category=FutureWarning, module=".*format") unsortable = DataFrame( { "foo": [1] * 50, datetime.today(): [1] * 50, "bar": ["bar"] * 50, datetime.today() + timedelta(1): ["bar"] * 50, }, index=np.arange(50), ) repr(unsortable) fmt.set_option("display.precision", 3, "display.column_space", 10) repr(float_frame) fmt.set_option("display.max_rows", 10, "display.max_columns", 2) repr(float_frame) fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000) repr(float_frame) tm.reset_display_options() warnings.filters = warn_filters def test_repr_unicode(self): uval = "\u03c3\u03c3\u03c3\u03c3" df = DataFrame({"A": [uval, uval]}) result = repr(df) ex_top = " A" assert result.split("\n")[0].rstrip() == ex_top df = DataFrame({"A": [uval, uval]}) result = repr(df) assert result.split("\n")[0].rstrip() == ex_top def test_unicode_string_with_unicode(self): df = DataFrame({"A": ["\u05d0"]}) str(df) def test_repr_unicode_columns(self): df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]}) repr(df.columns) # should not raise UnicodeDecodeError def test_str_to_bytes_raises(self): # GH 26447 df = DataFrame({"A": ["abc"]}) msg = "^'str' object cannot be interpreted as an integer$" with pytest.raises(TypeError, match=msg): bytes(df) def test_very_wide_info_repr(self): df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20)) repr(df) def test_repr_column_name_unicode_truncation_bug(self): # #1906 df = DataFrame( { "Id": [7117434], "StringCol": ( "Is it possible to modify drop plot code" "so that the output graph is displayed " "in iphone simulator, Is it possible to " "modify drop plot code so that the " "output graph is \xe2\x80\xa8displayed " "in iphone simulator.Now we are adding " "the CSV file externally. I want to Call " "the File through the code.." ), } ) with option_context("display.max_columns", 20): assert "StringCol" in repr(df) def test_latex_repr(self): result = r"""\begin{tabular}{llll} \toprule {} & 0 & 1 & 2 \\ \midrule 0 & $\alpha$ & b & c \\ 1 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ with option_context("display.latex.escape", False, "display.latex.repr", True): df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]]) assert result == df._repr_latex_() # GH 12182 assert df._repr_latex_() is None def test_repr_categorical_dates_periods(self): # normal DataFrame dt = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") p = period_range("2011-01", freq="M", periods=5) df = DataFrame({"dt": dt, "p": p}) exp = """ dt p 0 2011-01-01 09:00:00-05:00 2011-01 1 2011-01-01 10:00:00-05:00 2011-02 2 2011-01-01 11:00:00-05:00 2011-03 3 2011-01-01 12:00:00-05:00 2011-04 4 2011-01-01 13:00:00-05:00 2011-05""" assert repr(df) == exp df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)}) assert repr(df2) == exp @pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64]) @pytest.mark.parametrize( "box, expected", [[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]], ) def test_repr_np_nat_with_object(self, arg, box, expected): # GH 25445 result = repr(box([arg("NaT")], dtype=object)) assert result == expected def test_frame_datetime64_pre1900_repr(self): df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")}) # it works! repr(df)
0.00104
#!/usr/bin/env python # This file is part of VoltDB. # Copyright (C) 2008-2017 VoltDB Inc. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # A simple benchmark for the python client # You can start VoltDB with the measureoverhead benchmark with `ant benchmark -Dclient=org.volt.benchmark.overhead.OverheadClient -Dsitesperhost=2 -Dclientcount=1 -Dhostcount=1 -Dhost1=localhost -Dclienthost1=localhost -Dduration=999999 -Dtxnrate=1` # or you can start your own app and substitute your own procedure in ThreadFunc. # On my i7 920 desktop I was getting 4k invocations with 1 python process and 6 threads and 9.75k invocation with three python processes with 6 threads each # from multiprocessing import * from datetime import * from fastserializer import * def ProcessFunc( countQueue, endTime): client = FastSerializer("localhost", 21212, "", "") proc = VoltProcedure( client, "measureOverhead", [FastSerializer.VOLTTYPE_INTEGER] ) counter = 0; while datetime.datetime.now() < endTime: counter += 1 response = proc.call([counter]) if response.status != 1: print response.statusString now = datetime.datetime.now().microsecond / 1000 countQueue.put(counter) startTime = datetime.datetime.now() endTime = startTime + datetime.timedelta( 0, 60) countQueue = Queue() procs = [] for x in range(12): p = Process( target=ProcessFunc, args=( countQueue, endTime )) procs.append(p) p.start() for p in procs: p.join() requestCount = 0; count = None while not countQueue.empty(): requestCount = requestCount + countQueue.get() duration = datetime.datetime.now() - startTime print requestCount / duration.seconds
0.005955
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ DateBase class for Gramps. """ #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .date import Date #------------------------------------------------------------------------- # # DateBase classes # #------------------------------------------------------------------------- class DateBase: """ Base class for storing date information. """ def __init__(self, source=None): """ Create a new DateBase, copying from source if not None. :param source: Object used to initialize the new object :type source: DateBase """ if source: self.date = Date(source.date) else: self.date = Date() def serialize(self, no_text_date=False): """ Convert the object to a serialized tuple of data. """ if self.date is None or (self.date.is_empty() and not self.date.text): date = None else: date = self.date.serialize(no_text_date) return date def unserialize(self, data): """ Convert a serialized tuple of data to an object. """ self.date = Date() if data is not None: self.date.unserialize(data) def get_date_object(self): """ Return the :class:`~.date.Date` object associated with the DateBase. :returns: Returns a DateBase :class:`~.date.Date` instance. :rtype: :class:`~.date.Date` """ if not self.date: self.date = Date() return self.date def set_date_object(self, date): """ Set the :class:`~.date.Date` object associated with the DateBase. :param date: :class:`~.date.Date` instance to be assigned to the DateBase :type date: :class:`~.date.Date` """ self.date = date
0.001803
# coding=utf-8 from __future__ import unicode_literals from random import randint from .. import Provider as AddressProvider class Provider(AddressProvider): address_formats = ['{{street_address}}, {{city}}, {{postcode}}'] building_number_formats = ['#', '##', '###'] city_formats = ['{{city_prefix}} {{first_name}}'] street_address_formats = ['{{street_name}}, {{building_number}}'] street_name_formats = ['{{street_prefix}} {{last_name}}', '{{last_name}} {{street_suffix}}'] city_prefixes = ['місто', 'село', 'селище', 'хутір'] countries = [ 'Австралія', 'Австрія', 'Азербайджан', 'Албанія', 'Алжир', 'Ангола', 'Андорра', 'Антигуа і Барбуда', 'Аргентина', 'Афганістан', 'Багамські Острови', 'Бангладеш', 'Барбадос', 'Бахрейн', 'Беліз', 'Бельгія', 'Бенін', 'Білорусь', 'Болгарія', 'Болівія', 'Боснія і Герцеговина', 'Ботсвана', 'Бразилія', 'Бруней', 'Буркіна-Фасо', 'Бурунді', 'Бутан', 'Вануату', 'Ватикан', 'Велика Британія', 'Венесуела', 'В\'єтнам', 'Вірменія', 'Габон', 'Гаїті', 'Гаяна', 'Гамбія', 'Гана', 'Гватемала', 'Гвінея', 'Гвінея-Бісау', 'Гондурас', 'Гренада', 'Греція', 'Грузія', 'Данія', 'Джибуті', 'Домініка', 'Домініканська Республіка', 'Еквадор', 'Екваторіальна Гвінея', 'Еритрея', 'Естонія', 'Ефіопія', 'Єгипет', 'Ємен', 'Замбія', 'Західна Сахара', 'Зімбабве', 'Ізраїль', 'Індія', 'Індонезія', 'Ірак', 'Іран', 'Ірландія', 'Ісландія', 'Іспанія', 'Італія', 'Йорданія', 'Кабо-Верде', 'Казахстан', 'Камбоджа', 'Камерун', 'Канада', 'Катар', 'Кенія', 'Киргизстан', 'КНР', 'Кіпр', 'Кірибаті', 'Колумбія', 'Коморські Острови', 'Конго', 'ДР Конго', 'Південна Корея', 'Північна Корея', 'Косово', 'Коста-Рика', 'Кот-д\'Івуар', 'Куба', 'Кувейт', 'Лаос', 'Латвія', 'Лесото', 'Литва', 'Ліберія', 'Ліван', 'Лівія', 'Ліхтенштейн', 'Люксембург', 'Маврикій', 'Мавританія', 'Мадагаскар', 'Республіка Македонія', 'Малаві', 'Малайзія', 'Малі', 'Мальдіви', 'Мальта', 'Марокко', 'Маршаллові Острови', 'Мексика', 'Федеративні Штати Мікронезії', 'Мозамбік', 'Молдова', 'Монако', 'Монголія', 'М\'янма', 'Намібія', 'Науру', 'Непал', 'Нігер', 'Нігерія', 'Нідерланди', 'Нікарагуа', 'Німеччина', 'Нова Зеландія', 'Норвегія', 'ОАЕ', 'Оман', 'Пакистан', 'Палау', 'Палестинська держава', 'Панама', 'Папуа Нова Гвінея', 'ПАР', 'Парагвай', 'Перу', 'Південний Судан', 'Польща', 'Португалія', 'Росія', 'Руанда', 'Румунія', 'Сальвадор', 'Самоа', 'Сан-Марино', 'Сан-Томе і Принсіпі', 'Саудівська Аравія', 'Свазіленд', 'Сейшельські Острови', 'Сенегал', 'Сент-Вінсент і Гренадини', 'Сент-Кіттс і Невіс', 'Сент-Люсія', 'Сербія', 'Сінгапур', 'Сирія', 'Словаччина', 'Словенія', 'Соломонові Острови', 'Сомалі', 'Судан', 'Суринам', 'Східний Тимор', 'США', 'Сьєрра-Леоне', 'Таджикистан', 'Таїланд', 'Тайвань', 'Танзанія', 'Того', 'Тонга', 'Тринідад і Тобаго', 'Тувалу', 'Туніс', 'Туреччина', 'Туркменістан', 'Уганда', 'Угорщина', 'Узбекистан', 'Україна', 'Уругвай', 'Фіджі', 'Філіппіни', 'Фінляндія', 'Франція', 'Хорватія', 'Центральноафриканська Республіка', 'Чад', 'Чехія', 'Чилі', 'Чорногорія', 'Швейцарія', 'Швеція', 'Шрі-Ланка', 'Ямайка', 'Японія' ] street_prefixes = [ 'вулиця', 'проспект', 'майдан', 'набережна', 'бульвар', 'провулок' ] street_suffixes = ['узвіз'] @classmethod def city_prefix(cls): return cls.random_element(cls.city_prefixes) @classmethod def postcode(cls): """The code consists of five digits (01000-99999)""" return '{}{}'.format(randint(0, 10), randint(1000, 10000)) @classmethod def street_prefix(cls): return cls.random_element(cls.street_prefixes)
0
#!/usr/bin/env python """ This package defines classes that simplify bit-wise creation, manipulation and interpretation of data. Classes: Bits -- An immutable container for binary data. BitArray -- A mutable container for binary data. ConstBitStream -- An immutable container with streaming methods. BitStream -- A mutable container with streaming methods. Bits (base class) / \ + mutating methods / \ + streaming methods / \ BitArray ConstBitStream \ / \ / \ / BitStream Functions: pack -- Create a BitStream from a format string. Exceptions: Error -- Module exception base class. CreationError -- Error during creation. InterpretError -- Inappropriate interpretation of binary data. ByteAlignError -- Whole byte position or length needed. ReadError -- Reading or peeking past the end of a bitstring. http://python-bitstring.googlecode.com """ __licence__ = """ The MIT License Copyright (c) 2006-2016 Scott Griffiths ([email protected]) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __version__ = "3.1.3" __author__ = "Scott Griffiths" import numbers import copy import sys import re import binascii import mmap import os import struct import operator import collections byteorder = sys.byteorder bytealigned = False """Determines whether a number of methods default to working only on byte boundaries.""" # Maximum number of digits to use in __str__ and __repr__. MAX_CHARS = 250 # Maximum size of caches used for speed optimisations. CACHE_SIZE = 1000 class Error(Exception): """Base class for errors in the bitstring module.""" def __init__(self, *params): self.msg = params[0] if params else '' self.params = params[1:] def __str__(self): if self.params: return self.msg.format(*self.params) return self.msg class ReadError(Error, IndexError): """Reading or peeking past the end of a bitstring.""" def __init__(self, *params): Error.__init__(self, *params) class InterpretError(Error, ValueError): """Inappropriate interpretation of binary data.""" def __init__(self, *params): Error.__init__(self, *params) class ByteAlignError(Error): """Whole-byte position or length needed.""" def __init__(self, *params): Error.__init__(self, *params) class CreationError(Error, ValueError): """Inappropriate argument during bitstring creation.""" def __init__(self, *params): Error.__init__(self, *params) class ConstByteStore(object): """Stores raw bytes together with a bit offset and length. Used internally - not part of public interface. """ __slots__ = ('offset', '_rawarray', 'bitlength') def __init__(self, data, bitlength=None, offset=None): """data is either a bytearray or a MmapByteArray""" self._rawarray = data if offset is None: offset = 0 if bitlength is None: bitlength = 8 * len(data) - offset self.offset = offset self.bitlength = bitlength def getbit(self, pos): assert 0 <= pos < self.bitlength byte, bit = divmod(self.offset + pos, 8) return bool(self._rawarray[byte] & (128 >> bit)) def getbyte(self, pos): """Direct access to byte data.""" return self._rawarray[pos] def getbyteslice(self, start, end): """Direct access to byte data.""" c = self._rawarray[start:end] return c @property def bytelength(self): if not self.bitlength: return 0 sb = self.offset // 8 eb = (self.offset + self.bitlength - 1) // 8 return eb - sb + 1 def __copy__(self): return ByteStore(self._rawarray[:], self.bitlength, self.offset) def _appendstore(self, store): """Join another store on to the end of this one.""" if not store.bitlength: return # Set new array offset to the number of bits in the final byte of current array. store = offsetcopy(store, (self.offset + self.bitlength) % 8) if store.offset: # first do the byte with the join. joinval = (self._rawarray.pop() & (255 ^ (255 >> store.offset)) | (store.getbyte(0) & (255 >> store.offset))) self._rawarray.append(joinval) self._rawarray.extend(store._rawarray[1:]) else: self._rawarray.extend(store._rawarray) self.bitlength += store.bitlength def _prependstore(self, store): """Join another store on to the start of this one.""" if not store.bitlength: return # Set the offset of copy of store so that it's final byte # ends in a position that matches the offset of self, # then join self on to the end of it. store = offsetcopy(store, (self.offset - store.bitlength) % 8) assert (store.offset + store.bitlength) % 8 == self.offset % 8 bit_offset = self.offset % 8 if bit_offset: # first do the byte with the join. store.setbyte(-1, (store.getbyte(-1) & (255 ^ (255 >> bit_offset)) | \ (self._rawarray[self.byteoffset] & (255 >> bit_offset)))) store._rawarray.extend(self._rawarray[self.byteoffset + 1: self.byteoffset + self.bytelength]) else: store._rawarray.extend(self._rawarray[self.byteoffset: self.byteoffset + self.bytelength]) self._rawarray = store._rawarray self.offset = store.offset self.bitlength += store.bitlength @property def byteoffset(self): return self.offset // 8 @property def rawbytes(self): return self._rawarray class ByteStore(ConstByteStore): """Adding mutating methods to ConstByteStore Used internally - not part of public interface. """ __slots__ = () def setbit(self, pos): assert 0 <= pos < self.bitlength byte, bit = divmod(self.offset + pos, 8) self._rawarray[byte] |= (128 >> bit) def unsetbit(self, pos): assert 0 <= pos < self.bitlength byte, bit = divmod(self.offset + pos, 8) self._rawarray[byte] &= ~(128 >> bit) def invertbit(self, pos): assert 0 <= pos < self.bitlength byte, bit = divmod(self.offset + pos, 8) self._rawarray[byte] ^= (128 >> bit) def setbyte(self, pos, value): self._rawarray[pos] = value def setbyteslice(self, start, end, value): self._rawarray[start:end] = value def offsetcopy(s, newoffset): """Return a copy of a ByteStore with the newoffset. Not part of public interface. """ assert 0 <= newoffset < 8 if not s.bitlength: return copy.copy(s) else: if newoffset == s.offset % 8: return ByteStore(s.getbyteslice(s.byteoffset, s.byteoffset + s.bytelength), s.bitlength, newoffset) newdata = [] d = s._rawarray assert newoffset != s.offset % 8 if newoffset < s.offset % 8: # We need to shift everything left shiftleft = s.offset % 8 - newoffset # First deal with everything except for the final byte for x in range(s.byteoffset, s.byteoffset + s.bytelength - 1): newdata.append(((d[x] << shiftleft) & 0xff) +\ (d[x + 1] >> (8 - shiftleft))) bits_in_last_byte = (s.offset + s.bitlength) % 8 if not bits_in_last_byte: bits_in_last_byte = 8 if bits_in_last_byte > shiftleft: newdata.append((d[s.byteoffset + s.bytelength - 1] << shiftleft) & 0xff) else: # newoffset > s._offset % 8 shiftright = newoffset - s.offset % 8 newdata.append(s.getbyte(0) >> shiftright) for x in range(s.byteoffset + 1, s.byteoffset + s.bytelength): newdata.append(((d[x - 1] << (8 - shiftright)) & 0xff) +\ (d[x] >> shiftright)) bits_in_last_byte = (s.offset + s.bitlength) % 8 if not bits_in_last_byte: bits_in_last_byte = 8 if bits_in_last_byte + shiftright > 8: newdata.append((d[s.byteoffset + s.bytelength - 1] << (8 - shiftright)) & 0xff) new_s = ByteStore(bytearray(newdata), s.bitlength, newoffset) assert new_s.offset == newoffset return new_s def equal(a, b): """Return True if ByteStores a == b. Not part of public interface. """ # We want to return False for inequality as soon as possible, which # means we get lots of special cases. # First the easy one - compare lengths: a_bitlength = a.bitlength b_bitlength = b.bitlength if a_bitlength != b_bitlength: return False if not a_bitlength: assert b_bitlength == 0 return True # Make 'a' the one with the smaller offset if (a.offset % 8) > (b.offset % 8): a, b = b, a # and create some aliases a_bitoff = a.offset % 8 b_bitoff = b.offset % 8 a_byteoffset = a.byteoffset b_byteoffset = b.byteoffset a_bytelength = a.bytelength b_bytelength = b.bytelength da = a._rawarray db = b._rawarray # If they are pointing to the same data, they must be equal if da is db and a.offset == b.offset: return True if a_bitoff == b_bitoff: bits_spare_in_last_byte = 8 - (a_bitoff + a_bitlength) % 8 if bits_spare_in_last_byte == 8: bits_spare_in_last_byte = 0 # Special case for a, b contained in a single byte if a_bytelength == 1: a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength) b_val = ((db[b_byteoffset] << b_bitoff) & 0xff) >> (8 - b_bitlength) return a_val == b_val # Otherwise check first byte if da[a_byteoffset] & (0xff >> a_bitoff) != db[b_byteoffset] & (0xff >> b_bitoff): return False # then everything up to the last b_a_offset = b_byteoffset - a_byteoffset for x in range(1 + a_byteoffset, a_byteoffset + a_bytelength - 1): if da[x] != db[b_a_offset + x]: return False # and finally the last byte return (da[a_byteoffset + a_bytelength - 1] >> bits_spare_in_last_byte == db[b_byteoffset + b_bytelength - 1] >> bits_spare_in_last_byte) assert a_bitoff != b_bitoff # This is how much we need to shift a to the right to compare with b: shift = b_bitoff - a_bitoff # Special case for b only one byte long if b_bytelength == 1: assert a_bytelength == 1 a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength) b_val = ((db[b_byteoffset] << b_bitoff) & 0xff) >> (8 - b_bitlength) return a_val == b_val # Special case for a only one byte long if a_bytelength == 1: assert b_bytelength == 2 a_val = ((da[a_byteoffset] << a_bitoff) & 0xff) >> (8 - a_bitlength) b_val = ((db[b_byteoffset] << 8) + db[b_byteoffset + 1]) << b_bitoff b_val &= 0xffff b_val >>= 16 - b_bitlength return a_val == b_val # Compare first byte of b with bits from first byte of a if (da[a_byteoffset] & (0xff >> a_bitoff)) >> shift != db[b_byteoffset] & (0xff >> b_bitoff): return False # Now compare every full byte of b with bits from 2 bytes of a for x in range(1, b_bytelength - 1): # Construct byte from 2 bytes in a to compare to byte in b b_val = db[b_byteoffset + x] a_val = ((da[a_byteoffset + x - 1] << 8) + da[a_byteoffset + x]) >> shift a_val &= 0xff if a_val != b_val: return False # Now check bits in final byte of b final_b_bits = (b.offset + b_bitlength) % 8 if not final_b_bits: final_b_bits = 8 b_val = db[b_byteoffset + b_bytelength - 1] >> (8 - final_b_bits) final_a_bits = (a.offset + a_bitlength) % 8 if not final_a_bits: final_a_bits = 8 if b.bytelength > a_bytelength: assert b_bytelength == a_bytelength + 1 a_val = da[a_byteoffset + a_bytelength - 1] >> (8 - final_a_bits) a_val &= 0xff >> (8 - final_b_bits) return a_val == b_val assert a_bytelength == b_bytelength a_val = da[a_byteoffset + a_bytelength - 2] << 8 a_val += da[a_byteoffset + a_bytelength - 1] a_val >>= (8 - final_a_bits) a_val &= 0xff >> (8 - final_b_bits) return a_val == b_val class MmapByteArray(object): """Looks like a bytearray, but from an mmap. Not part of public interface. """ __slots__ = ('filemap', 'filelength', 'source', 'byteoffset', 'bytelength') def __init__(self, source, bytelength=None, byteoffset=None): self.source = source source.seek(0, os.SEEK_END) self.filelength = source.tell() if byteoffset is None: byteoffset = 0 if bytelength is None: bytelength = self.filelength - byteoffset self.byteoffset = byteoffset self.bytelength = bytelength self.filemap = mmap.mmap(source.fileno(), 0, access=mmap.ACCESS_READ) def __getitem__(self, key): try: start = key.start stop = key.stop except AttributeError: try: assert 0 <= key < self.bytelength return ord(self.filemap[key + self.byteoffset]) except TypeError: # for Python 3 return self.filemap[key + self.byteoffset] else: if start is None: start = 0 if stop is None: stop = self.bytelength assert key.step is None assert 0 <= start < self.bytelength assert 0 <= stop <= self.bytelength s = slice(start + self.byteoffset, stop + self.byteoffset) return bytearray(self.filemap.__getitem__(s)) def __len__(self): return self.bytelength # This creates a dictionary for every possible byte with the value being # the key with its bits reversed. BYTE_REVERSAL_DICT = dict() # For Python 2.x/ 3.x coexistence # Yes this is very very hacky. try: xrange for i in range(256): BYTE_REVERSAL_DICT[i] = chr(int("{0:08b}".format(i)[::-1], 2)) except NameError: for i in range(256): BYTE_REVERSAL_DICT[i] = bytes([int("{0:08b}".format(i)[::-1], 2)]) from io import IOBase as file xrange = range basestring = str # Python 2.x octals start with '0', in Python 3 it's '0o' LEADING_OCT_CHARS = len(oct(1)) - 1 def tidy_input_string(s): """Return string made lowercase and with all whitespace removed.""" s = ''.join(s.split()).lower() return s INIT_NAMES = ('uint', 'int', 'ue', 'se', 'sie', 'uie', 'hex', 'oct', 'bin', 'bits', 'uintbe', 'intbe', 'uintle', 'intle', 'uintne', 'intne', 'float', 'floatbe', 'floatle', 'floatne', 'bytes', 'bool', 'pad') TOKEN_RE = re.compile(r'(?P<name>' + '|'.join(INIT_NAMES) + r')((:(?P<len>[^=]+)))?(=(?P<value>.*))?$', re.IGNORECASE) DEFAULT_UINT = re.compile(r'(?P<len>[^=]+)?(=(?P<value>.*))?$', re.IGNORECASE) MULTIPLICATIVE_RE = re.compile(r'(?P<factor>.*)\*(?P<token>.+)') # Hex, oct or binary literals LITERAL_RE = re.compile(r'(?P<name>0(x|o|b))(?P<value>.+)', re.IGNORECASE) # An endianness indicator followed by one or more struct.pack codes STRUCT_PACK_RE = re.compile(r'(?P<endian><|>|@)?(?P<fmt>(?:\d*[bBhHlLqQfd])+)$') # A number followed by a single character struct.pack code STRUCT_SPLIT_RE = re.compile(r'\d*[bBhHlLqQfd]') # These replicate the struct.pack codes # Big-endian REPLACEMENTS_BE = {'b': 'intbe:8', 'B': 'uintbe:8', 'h': 'intbe:16', 'H': 'uintbe:16', 'l': 'intbe:32', 'L': 'uintbe:32', 'q': 'intbe:64', 'Q': 'uintbe:64', 'f': 'floatbe:32', 'd': 'floatbe:64'} # Little-endian REPLACEMENTS_LE = {'b': 'intle:8', 'B': 'uintle:8', 'h': 'intle:16', 'H': 'uintle:16', 'l': 'intle:32', 'L': 'uintle:32', 'q': 'intle:64', 'Q': 'uintle:64', 'f': 'floatle:32', 'd': 'floatle:64'} # Size in bytes of all the pack codes. PACK_CODE_SIZE = {'b': 1, 'B': 1, 'h': 2, 'H': 2, 'l': 4, 'L': 4, 'q': 8, 'Q': 8, 'f': 4, 'd': 8} _tokenname_to_initialiser = {'hex': 'hex', '0x': 'hex', '0X': 'hex', 'oct': 'oct', '0o': 'oct', '0O': 'oct', 'bin': 'bin', '0b': 'bin', '0B': 'bin', 'bits': 'auto', 'bytes': 'bytes', 'pad': 'pad'} def structparser(token): """Parse struct-like format string token into sub-token list.""" m = STRUCT_PACK_RE.match(token) if not m: return [token] else: endian = m.group('endian') if endian is None: return [token] # Split the format string into a list of 'q', '4h' etc. formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt')) # Now deal with mulitiplicative factors, 4h -> hhhh etc. fmt = ''.join([f[-1] * int(f[:-1]) if len(f) != 1 else f for f in formatlist]) if endian == '@': # Native endianness if byteorder == 'little': endian = '<' else: assert byteorder == 'big' endian = '>' if endian == '<': tokens = [REPLACEMENTS_LE[c] for c in fmt] else: assert endian == '>' tokens = [REPLACEMENTS_BE[c] for c in fmt] return tokens def tokenparser(fmt, keys=None, token_cache={}): """Divide the format string into tokens and parse them. Return stretchy token and list of [initialiser, length, value] initialiser is one of: hex, oct, bin, uint, int, se, ue, 0x, 0o, 0b etc. length is None if not known, as is value. If the token is in the keyword dictionary (keys) then it counts as a special case and isn't messed with. tokens must be of the form: [factor*][initialiser][:][length][=value] """ try: return token_cache[(fmt, keys)] except KeyError: token_key = (fmt, keys) # Very inefficient expanding of brackets. fmt = expand_brackets(fmt) # Split tokens by ',' and remove whitespace # The meta_tokens can either be ordinary single tokens or multiple # struct-format token strings. meta_tokens = (''.join(f.split()) for f in fmt.split(',')) return_values = [] stretchy_token = False for meta_token in meta_tokens: # See if it has a multiplicative factor m = MULTIPLICATIVE_RE.match(meta_token) if not m: factor = 1 else: factor = int(m.group('factor')) meta_token = m.group('token') # See if it's a struct-like format tokens = structparser(meta_token) ret_vals = [] for token in tokens: if keys and token in keys: # Don't bother parsing it, it's a keyword argument ret_vals.append([token, None, None]) continue value = length = None if token == '': continue # Match literal tokens of the form 0x... 0o... and 0b... m = LITERAL_RE.match(token) if m: name = m.group('name') value = m.group('value') ret_vals.append([name, length, value]) continue # Match everything else: m1 = TOKEN_RE.match(token) if not m1: # and if you don't specify a 'name' then the default is 'uint': m2 = DEFAULT_UINT.match(token) if not m2: raise ValueError("Don't understand token '{0}'.".format(token)) if m1: name = m1.group('name') length = m1.group('len') if m1.group('value'): value = m1.group('value') else: assert m2 name = 'uint' length = m2.group('len') if m2.group('value'): value = m2.group('value') if name == 'bool': if length is not None: raise ValueError("You can't specify a length with bool tokens - they are always one bit.") length = 1 if length is None and name not in ('se', 'ue', 'sie', 'uie'): stretchy_token = True if length is not None: # Try converting length to int, otherwise check it's a key. try: length = int(length) if length < 0: raise Error # For the 'bytes' token convert length to bits. if name == 'bytes': length *= 8 except Error: raise ValueError("Can't read a token with a negative length.") except ValueError: if not keys or length not in keys: raise ValueError("Don't understand length '{0}' of token.".format(length)) ret_vals.append([name, length, value]) # This multiplies by the multiplicative factor, but this means that # we can't allow keyword values as multipliers (e.g. n*uint:8). # The only way to do this would be to return the factor in some fashion # (we can't use the key's value here as it would mean that we couldn't # sensibly continue to cache the function's results. (TODO). return_values.extend(ret_vals * factor) return_values = [tuple(x) for x in return_values] if len(token_cache) < CACHE_SIZE: token_cache[token_key] = stretchy_token, return_values return stretchy_token, return_values # Looks for first number*( BRACKET_RE = re.compile(r'(?P<factor>\d+)\*\(') def expand_brackets(s): """Remove whitespace and expand all brackets.""" s = ''.join(s.split()) while True: start = s.find('(') if start == -1: break count = 1 # Number of hanging open brackets p = start + 1 while p < len(s): if s[p] == '(': count += 1 if s[p] == ')': count -= 1 if not count: break p += 1 if count: raise ValueError("Unbalanced parenthesis in '{0}'.".format(s)) if start == 0 or s[start - 1] != '*': s = s[0:start] + s[start + 1:p] + s[p + 1:] else: m = BRACKET_RE.search(s) if m: factor = int(m.group('factor')) matchstart = m.start('factor') s = s[0:matchstart] + (factor - 1) * (s[start + 1:p] + ',') + s[start + 1:p] + s[p + 1:] else: raise ValueError("Failed to parse '{0}'.".format(s)) return s # This converts a single octal digit to 3 bits. OCT_TO_BITS = ['{0:03b}'.format(i) for i in xrange(8)] # A dictionary of number of 1 bits contained in binary representation of any byte BIT_COUNT = dict(zip(xrange(256), [bin(i).count('1') for i in xrange(256)])) class Bits(object): """A container holding an immutable sequence of bits. For a mutable container use the BitArray class instead. Methods: all() -- Check if all specified bits are set to 1 or 0. any() -- Check if any of specified bits are set to 1 or 0. count() -- Count the number of bits set to 1 or 0. cut() -- Create generator of constant sized chunks. endswith() -- Return whether the bitstring ends with a sub-string. find() -- Find a sub-bitstring in the current bitstring. findall() -- Find all occurrences of a sub-bitstring in the current bitstring. join() -- Join bitstrings together using current bitstring. rfind() -- Seek backwards to find a sub-bitstring. split() -- Create generator of chunks split by a delimiter. startswith() -- Return whether the bitstring starts with a sub-bitstring. tobytes() -- Return bitstring as bytes, padding if needed. tofile() -- Write bitstring to file, padding if needed. unpack() -- Interpret bits using format string. Special methods: Also available are the operators [], ==, !=, +, *, ~, <<, >>, &, |, ^. Properties: bin -- The bitstring as a binary string. bool -- For single bit bitstrings, interpret as True or False. bytes -- The bitstring as a bytes object. float -- Interpret as a floating point number. floatbe -- Interpret as a big-endian floating point number. floatle -- Interpret as a little-endian floating point number. floatne -- Interpret as a native-endian floating point number. hex -- The bitstring as a hexadecimal string. int -- Interpret as a two's complement signed integer. intbe -- Interpret as a big-endian signed integer. intle -- Interpret as a little-endian signed integer. intne -- Interpret as a native-endian signed integer. len -- Length of the bitstring in bits. oct -- The bitstring as an octal string. se -- Interpret as a signed exponential-Golomb code. ue -- Interpret as an unsigned exponential-Golomb code. sie -- Interpret as a signed interleaved exponential-Golomb code. uie -- Interpret as an unsigned interleaved exponential-Golomb code. uint -- Interpret as a two's complement unsigned integer. uintbe -- Interpret as a big-endian unsigned integer. uintle -- Interpret as a little-endian unsigned integer. uintne -- Interpret as a native-endian unsigned integer. """ __slots__ = ('_datastore') def __init__(self, auto=None, length=None, offset=None, **kwargs): """Either specify an 'auto' initialiser: auto -- a string of comma separated tokens, an integer, a file object, a bytearray, a boolean iterable or another bitstring. Or initialise via **kwargs with one (and only one) of: bytes -- raw data as a string, for example read from a binary file. bin -- binary string representation, e.g. '0b001010'. hex -- hexadecimal string representation, e.g. '0x2ef' oct -- octal string representation, e.g. '0o777'. uint -- an unsigned integer. int -- a signed integer. float -- a floating point number. uintbe -- an unsigned big-endian whole byte integer. intbe -- a signed big-endian whole byte integer. floatbe - a big-endian floating point number. uintle -- an unsigned little-endian whole byte integer. intle -- a signed little-endian whole byte integer. floatle -- a little-endian floating point number. uintne -- an unsigned native-endian whole byte integer. intne -- a signed native-endian whole byte integer. floatne -- a native-endian floating point number. se -- a signed exponential-Golomb code. ue -- an unsigned exponential-Golomb code. sie -- a signed interleaved exponential-Golomb code. uie -- an unsigned interleaved exponential-Golomb code. bool -- a boolean (True or False). filename -- a file which will be opened in binary read-only mode. Other keyword arguments: length -- length of the bitstring in bits, if needed and appropriate. It must be supplied for all integer and float initialisers. offset -- bit offset to the data. These offset bits are ignored and this is mainly intended for use when initialising using 'bytes' or 'filename'. """ pass def __new__(cls, auto=None, length=None, offset=None, _cache={}, **kwargs): # For instances auto-initialised with a string we intern the # instance for re-use. try: if isinstance(auto, basestring): try: return _cache[auto] except KeyError: x = object.__new__(Bits) try: _, tokens = tokenparser(auto) except ValueError as e: raise CreationError(*e.args) x._datastore = ConstByteStore(bytearray(0), 0, 0) for token in tokens: x._datastore._appendstore(Bits._init_with_token(*token)._datastore) assert x._assertsanity() if len(_cache) < CACHE_SIZE: _cache[auto] = x return x if isinstance(auto, Bits): return auto except TypeError: pass x = super(Bits, cls).__new__(cls) x._initialise(auto, length, offset, **kwargs) return x def _initialise(self, auto, length, offset, **kwargs): if length is not None and length < 0: raise CreationError("bitstring length cannot be negative.") if offset is not None and offset < 0: raise CreationError("offset must be >= 0.") if auto is not None: self._initialise_from_auto(auto, length, offset) return if not kwargs: # No initialisers, so initialise with nothing or zero bits if length is not None and length != 0: data = bytearray((length + 7) // 8) self._setbytes_unsafe(data, length, 0) return self._setbytes_unsafe(bytearray(0), 0, 0) return k, v = kwargs.popitem() try: init_without_length_or_offset[k](self, v) if length is not None or offset is not None: raise CreationError("Cannot use length or offset with this initialiser.") except KeyError: try: init_with_length_only[k](self, v, length) if offset is not None: raise CreationError("Cannot use offset with this initialiser.") except KeyError: if offset is None: offset = 0 try: init_with_length_and_offset[k](self, v, length, offset) except KeyError: raise CreationError("Unrecognised keyword '{0}' used to initialise.", k) def _initialise_from_auto(self, auto, length, offset): if offset is None: offset = 0 self._setauto(auto, length, offset) return def __copy__(self): """Return a new copy of the Bits for the copy module.""" # Note that if you want a new copy (different ID), use _copy instead. # The copy can return self as it's immutable. return self def __lt__(self, other): raise TypeError("unorderable type: {0}".format(type(self).__name__)) def __gt__(self, other): raise TypeError("unorderable type: {0}".format(type(self).__name__)) def __le__(self, other): raise TypeError("unorderable type: {0}".format(type(self).__name__)) def __ge__(self, other): raise TypeError("unorderable type: {0}".format(type(self).__name__)) def __add__(self, bs): """Concatenate bitstrings and return new bitstring. bs -- the bitstring to append. """ bs = Bits(bs) if bs.len <= self.len: s = self._copy() s._append(bs) else: s = bs._copy() s = self.__class__(s) s._prepend(self) return s def __radd__(self, bs): """Append current bitstring to bs and return new bitstring. bs -- the string for the 'auto' initialiser that will be appended to. """ bs = self._converttobitstring(bs) return bs.__add__(self) def __getitem__(self, key): """Return a new bitstring representing a slice of the current bitstring. Indices are in units of the step parameter (default 1 bit). Stepping is used to specify the number of bits in each item. >>> print BitArray('0b00110')[1:4] '0b011' >>> print BitArray('0x00112233')[1:3:8] '0x1122' """ length = self.len try: step = key.step if key.step is not None else 1 except AttributeError: # single element if key < 0: key += length if not 0 <= key < length: raise IndexError("Slice index out of range.") # Single bit, return True or False return self._datastore.getbit(key) else: if step != 1: # convert to binary string and use string slicing bs = self.__class__() bs._setbin_unsafe(self._getbin().__getitem__(key)) return bs start, stop = 0, length if key.start is not None: start = key.start if key.start < 0: start += stop if key.stop is not None: stop = key.stop if key.stop < 0: stop += length start = max(start, 0) stop = min(stop, length) if start < stop: return self._slice(start, stop) else: return self.__class__() def __len__(self): """Return the length of the bitstring in bits.""" return self._getlength() def __str__(self): """Return approximate string representation of bitstring for printing. Short strings will be given wholly in hexadecimal or binary. Longer strings may be part hexadecimal and part binary. Very long strings will be truncated with '...'. """ length = self.len if not length: return '' if length > MAX_CHARS * 4: # Too long for hex. Truncate... return ''.join(('0x', self._readhex(MAX_CHARS * 4, 0), '...')) # If it's quite short and we can't do hex then use bin if length < 32 and length % 4 != 0: return '0b' + self.bin # If we can use hex then do so if not length % 4: return '0x' + self.hex # Otherwise first we do as much as we can in hex # then add on 1, 2 or 3 bits on at the end bits_at_end = length % 4 return ''.join(('0x', self._readhex(length - bits_at_end, 0), ', ', '0b', self._readbin(bits_at_end, length - bits_at_end))) def __repr__(self): """Return representation that could be used to recreate the bitstring. If the returned string is too long it will be truncated. See __str__(). """ length = self.len if isinstance(self._datastore._rawarray, MmapByteArray): offsetstring = '' if self._datastore.byteoffset or self._offset: offsetstring = ", offset=%d" % (self._datastore._rawarray.byteoffset * 8 + self._offset) lengthstring = ", length=%d" % length return "{0}(filename='{1}'{2}{3})".format(self.__class__.__name__, self._datastore._rawarray.source.name, lengthstring, offsetstring) else: s = self.__str__() lengthstring = '' if s.endswith('...'): lengthstring = " # length={0}".format(length) return "{0}('{1}'){2}".format(self.__class__.__name__, s, lengthstring) def __eq__(self, bs): """Return True if two bitstrings have the same binary representation. >>> BitArray('0b1110') == '0xe' True """ try: bs = Bits(bs) except TypeError: return False return equal(self._datastore, bs._datastore) def __ne__(self, bs): """Return False if two bitstrings have the same binary representation. >>> BitArray('0b111') == '0x7' False """ return not self.__eq__(bs) def __invert__(self): """Return bitstring with every bit inverted. Raises Error if the bitstring is empty. """ if not self.len: raise Error("Cannot invert empty bitstring.") s = self._copy() s._invert_all() return s def __lshift__(self, n): """Return bitstring with bits shifted by n to the left. n -- the number of bits to shift. Must be >= 0. """ if n < 0: raise ValueError("Cannot shift by a negative amount.") if not self.len: raise ValueError("Cannot shift an empty bitstring.") n = min(n, self.len) s = self._slice(n, self.len) s._append(Bits(n)) return s def __rshift__(self, n): """Return bitstring with bits shifted by n to the right. n -- the number of bits to shift. Must be >= 0. """ if n < 0: raise ValueError("Cannot shift by a negative amount.") if not self.len: raise ValueError("Cannot shift an empty bitstring.") if not n: return self._copy() s = self.__class__(length=min(n, self.len)) s._append(self[:-n]) return s def __mul__(self, n): """Return bitstring consisting of n concatenations of self. Called for expression of the form 'a = b*3'. n -- The number of concatenations. Must be >= 0. """ if n < 0: raise ValueError("Cannot multiply by a negative integer.") if not n: return self.__class__() s = self._copy() s._imul(n) return s def __rmul__(self, n): """Return bitstring consisting of n concatenations of self. Called for expressions of the form 'a = 3*b'. n -- The number of concatenations. Must be >= 0. """ return self.__mul__(n) def __and__(self, bs): """Bit-wise 'and' between two bitstrings. Returns new bitstring. bs -- The bitstring to '&' with. Raises ValueError if the two bitstrings have differing lengths. """ bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for & operator.") s = self._copy() s._iand(bs) return s def __rand__(self, bs): """Bit-wise 'and' between two bitstrings. Returns new bitstring. bs -- the bitstring to '&' with. Raises ValueError if the two bitstrings have differing lengths. """ return self.__and__(bs) def __or__(self, bs): """Bit-wise 'or' between two bitstrings. Returns new bitstring. bs -- The bitstring to '|' with. Raises ValueError if the two bitstrings have differing lengths. """ bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for | operator.") s = self._copy() s._ior(bs) return s def __ror__(self, bs): """Bit-wise 'or' between two bitstrings. Returns new bitstring. bs -- The bitstring to '|' with. Raises ValueError if the two bitstrings have differing lengths. """ return self.__or__(bs) def __xor__(self, bs): """Bit-wise 'xor' between two bitstrings. Returns new bitstring. bs -- The bitstring to '^' with. Raises ValueError if the two bitstrings have differing lengths. """ bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for ^ operator.") s = self._copy() s._ixor(bs) return s def __rxor__(self, bs): """Bit-wise 'xor' between two bitstrings. Returns new bitstring. bs -- The bitstring to '^' with. Raises ValueError if the two bitstrings have differing lengths. """ return self.__xor__(bs) def __contains__(self, bs): """Return whether bs is contained in the current bitstring. bs -- The bitstring to search for. """ # Don't want to change pos try: pos = self._pos except AttributeError: pass found = Bits.find(self, bs, bytealigned=False) try: self._pos = pos except AttributeError: pass return bool(found) def __hash__(self): """Return an integer hash of the object.""" # We can't in general hash the whole bitstring (it could take hours!) # So instead take some bits from the start and end. if self.len <= 160: # Use the whole bitstring. shorter = self else: # Take 10 bytes from start and end shorter = self[:80] + self[-80:] h = 0 for byte in shorter.tobytes(): try: h = (h << 4) + ord(byte) except TypeError: # Python 3 h = (h << 4) + byte g = h & 0xf0000000 if g & (1 << 31): h ^= (g >> 24) h ^= g return h % 1442968193 # This is only used in Python 2.x... def __nonzero__(self): """Return True if any bits are set to 1, otherwise return False.""" return self.any(True) # ...whereas this is used in Python 3.x __bool__ = __nonzero__ def _assertsanity(self): """Check internal self consistency as a debugging aid.""" assert self.len >= 0 assert 0 <= self._offset, "offset={0}".format(self._offset) assert (self.len + self._offset + 7) // 8 == self._datastore.bytelength + self._datastore.byteoffset return True @classmethod def _init_with_token(cls, name, token_length, value): if token_length is not None: token_length = int(token_length) if token_length == 0: return cls() # For pad token just return the length in zero bits if name == 'pad': return cls(token_length) if value is None: if token_length is None: error = "Token has no value ({0}=???).".format(name) else: error = "Token has no value ({0}:{1}=???).".format(name, token_length) raise ValueError(error) try: b = cls(**{_tokenname_to_initialiser[name]: value}) except KeyError: if name in ('se', 'ue', 'sie', 'uie'): b = cls(**{name: int(value)}) elif name in ('uint', 'int', 'uintbe', 'intbe', 'uintle', 'intle', 'uintne', 'intne'): b = cls(**{name: int(value), 'length': token_length}) elif name in ('float', 'floatbe', 'floatle', 'floatne'): b = cls(**{name: float(value), 'length': token_length}) elif name == 'bool': if value in (1, 'True', '1'): b = cls(bool=True) elif value in (0, 'False', '0'): b = cls(bool=False) else: raise CreationError("bool token can only be 'True' or 'False'.") else: raise CreationError("Can't parse token name {0}.", name) if token_length is not None and b.len != token_length: msg = "Token with length {0} packed with value of length {1} ({2}:{3}={4})." raise CreationError(msg, token_length, b.len, name, token_length, value) return b def _clear(self): """Reset the bitstring to an empty state.""" self._datastore = ByteStore(bytearray(0)) def _setauto(self, s, length, offset): """Set bitstring from a bitstring, file, bool, integer, iterable or string.""" # As s can be so many different things it's important to do the checks # in the correct order, as some types are also other allowed types. # So basestring must be checked before Iterable # and bytes/bytearray before Iterable but after basestring! if isinstance(s, Bits): if length is None: length = s.len - offset self._setbytes_unsafe(s._datastore.rawbytes, length, s._offset + offset) return if isinstance(s, file): if offset is None: offset = 0 if length is None: length = os.path.getsize(s.name) * 8 - offset byteoffset, offset = divmod(offset, 8) bytelength = (length + byteoffset * 8 + offset + 7) // 8 - byteoffset m = MmapByteArray(s, bytelength, byteoffset) if length + byteoffset * 8 + offset > m.filelength * 8: raise CreationError("File is not long enough for specified " "length and offset.") self._datastore = ConstByteStore(m, length, offset) return if length is not None: raise CreationError("The length keyword isn't applicable to this initialiser.") if offset: raise CreationError("The offset keyword isn't applicable to this initialiser.") if isinstance(s, basestring): bs = self._converttobitstring(s) assert bs._offset == 0 self._setbytes_unsafe(bs._datastore.rawbytes, bs.length, 0) return if isinstance(s, (bytes, bytearray)): self._setbytes_unsafe(bytearray(s), len(s) * 8, 0) return if isinstance(s, numbers.Integral): # Initialise with s zero bits. if s < 0: msg = "Can't create bitstring of negative length {0}." raise CreationError(msg, s) data = bytearray((s + 7) // 8) self._datastore = ByteStore(data, s, 0) return if isinstance(s, collections.Iterable): # Evaluate each item as True or False and set bits to 1 or 0. self._setbin_unsafe(''.join(str(int(bool(x))) for x in s)) return raise TypeError("Cannot initialise bitstring from {0}.".format(type(s))) def _setfile(self, filename, length, offset): """Use file as source of bits.""" source = open(filename, 'rb') if offset is None: offset = 0 if length is None: length = os.path.getsize(source.name) * 8 - offset byteoffset, offset = divmod(offset, 8) bytelength = (length + byteoffset * 8 + offset + 7) // 8 - byteoffset m = MmapByteArray(source, bytelength, byteoffset) if length + byteoffset * 8 + offset > m.filelength * 8: raise CreationError("File is not long enough for specified " "length and offset.") self._datastore = ConstByteStore(m, length, offset) def _setbytes_safe(self, data, length=None, offset=0): """Set the data from a string.""" data = bytearray(data) if length is None: # Use to the end of the data length = len(data)*8 - offset self._datastore = ByteStore(data, length, offset) else: if length + offset > len(data) * 8: msg = "Not enough data present. Need {0} bits, have {1}." raise CreationError(msg, length + offset, len(data) * 8) if length == 0: self._datastore = ByteStore(bytearray(0)) else: self._datastore = ByteStore(data, length, offset) def _setbytes_unsafe(self, data, length, offset): """Unchecked version of _setbytes_safe.""" self._datastore = ByteStore(data[:], length, offset) assert self._assertsanity() def _readbytes(self, length, start): """Read bytes and return them. Note that length is in bits.""" assert length % 8 == 0 assert start + length <= self.len if not (start + self._offset) % 8: return bytes(self._datastore.getbyteslice((start + self._offset) // 8, (start + self._offset + length) // 8)) return self._slice(start, start + length).tobytes() def _getbytes(self): """Return the data as an ordinary string.""" if self.len % 8: raise InterpretError("Cannot interpret as bytes unambiguously - " "not multiple of 8 bits.") return self._readbytes(self.len, 0) def _setuint(self, uint, length=None): """Reset the bitstring to have given unsigned int interpretation.""" try: if length is None: # Use the whole length. Deliberately not using .len here. length = self._datastore.bitlength except AttributeError: # bitstring doesn't have a _datastore as it hasn't been created! pass # TODO: All this checking code should be hoisted out of here! if length is None or length == 0: raise CreationError("A non-zero length must be specified with a " "uint initialiser.") if uint >= (1 << length): msg = "{0} is too large an unsigned integer for a bitstring of length {1}. "\ "The allowed range is [0, {2}]." raise CreationError(msg, uint, length, (1 << length) - 1) if uint < 0: raise CreationError("uint cannot be initialsed by a negative number.") s = hex(uint)[2:] s = s.rstrip('L') if len(s) & 1: s = '0' + s try: data = bytes.fromhex(s) except AttributeError: # the Python 2.x way data = binascii.unhexlify(s) # Now add bytes as needed to get the right length. extrabytes = ((length + 7) // 8) - len(data) if extrabytes > 0: data = b'\x00' * extrabytes + data offset = 8 - (length % 8) if offset == 8: offset = 0 self._setbytes_unsafe(bytearray(data), length, offset) def _readuint(self, length, start): """Read bits and interpret as an unsigned int.""" if not length: raise InterpretError("Cannot interpret a zero length bitstring " "as an integer.") offset = self._offset startbyte = (start + offset) // 8 endbyte = (start + offset + length - 1) // 8 b = binascii.hexlify(bytes(self._datastore.getbyteslice(startbyte, endbyte + 1))) assert b i = int(b, 16) final_bits = 8 - ((start + offset + length) % 8) if final_bits != 8: i >>= final_bits i &= (1 << length) - 1 return i def _getuint(self): """Return data as an unsigned int.""" return self._readuint(self.len, 0) def _setint(self, int_, length=None): """Reset the bitstring to have given signed int interpretation.""" # If no length given, and we've previously been given a length, use it. if length is None and hasattr(self, 'len') and self.len != 0: length = self.len if length is None or length == 0: raise CreationError("A non-zero length must be specified with an int initialiser.") if int_ >= (1 << (length - 1)) or int_ < -(1 << (length - 1)): raise CreationError("{0} is too large a signed integer for a bitstring of length {1}. " "The allowed range is [{2}, {3}].", int_, length, -(1 << (length - 1)), (1 << (length - 1)) - 1) if int_ >= 0: self._setuint(int_, length) return # TODO: We should decide whether to just use the _setuint, or to do the bit flipping, # based upon which will be quicker. If the -ive number is less than half the maximum # possible then it's probably quicker to do the bit flipping... # Do the 2's complement thing. Add one, set to minus number, then flip bits. int_ += 1 self._setuint(-int_, length) self._invert_all() def _readint(self, length, start): """Read bits and interpret as a signed int""" ui = self._readuint(length, start) if not ui >> (length - 1): # Top bit not set, number is positive return ui # Top bit is set, so number is negative tmp = (~(ui - 1)) & ((1 << length) - 1) return -tmp def _getint(self): """Return data as a two's complement signed int.""" return self._readint(self.len, 0) def _setuintbe(self, uintbe, length=None): """Set the bitstring to a big-endian unsigned int interpretation.""" if length is not None and length % 8 != 0: raise CreationError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) self._setuint(uintbe, length) def _readuintbe(self, length, start): """Read bits and interpret as a big-endian unsigned int.""" if length % 8: raise InterpretError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) return self._readuint(length, start) def _getuintbe(self): """Return data as a big-endian two's complement unsigned int.""" return self._readuintbe(self.len, 0) def _setintbe(self, intbe, length=None): """Set bitstring to a big-endian signed int interpretation.""" if length is not None and length % 8 != 0: raise CreationError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) self._setint(intbe, length) def _readintbe(self, length, start): """Read bits and interpret as a big-endian signed int.""" if length % 8: raise InterpretError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) return self._readint(length, start) def _getintbe(self): """Return data as a big-endian two's complement signed int.""" return self._readintbe(self.len, 0) def _setuintle(self, uintle, length=None): if length is not None and length % 8 != 0: raise CreationError("Little-endian integers must be whole-byte. " "Length = {0} bits.", length) self._setuint(uintle, length) self._reversebytes(0, self.len) def _readuintle(self, length, start): """Read bits and interpret as a little-endian unsigned int.""" if length % 8: raise InterpretError("Little-endian integers must be whole-byte. " "Length = {0} bits.", length) assert start + length <= self.len absolute_pos = start + self._offset startbyte, offset = divmod(absolute_pos, 8) val = 0 if not offset: endbyte = (absolute_pos + length - 1) // 8 chunksize = 4 # for 'L' format while endbyte - chunksize + 1 >= startbyte: val <<= 8 * chunksize val += struct.unpack('<L', bytes(self._datastore.getbyteslice(endbyte + 1 - chunksize, endbyte + 1)))[0] endbyte -= chunksize for b in xrange(endbyte, startbyte - 1, -1): val <<= 8 val += self._datastore.getbyte(b) else: data = self._slice(start, start + length) assert data.len % 8 == 0 data._reversebytes(0, self.len) for b in bytearray(data.bytes): val <<= 8 val += b return val def _getuintle(self): return self._readuintle(self.len, 0) def _setintle(self, intle, length=None): if length is not None and length % 8 != 0: raise CreationError("Little-endian integers must be whole-byte. " "Length = {0} bits.", length) self._setint(intle, length) self._reversebytes(0, self.len) def _readintle(self, length, start): """Read bits and interpret as a little-endian signed int.""" ui = self._readuintle(length, start) if not ui >> (length - 1): # Top bit not set, number is positive return ui # Top bit is set, so number is negative tmp = (~(ui - 1)) & ((1 << length) - 1) return -tmp def _getintle(self): return self._readintle(self.len, 0) def _setfloat(self, f, length=None): # If no length given, and we've previously been given a length, use it. if length is None and hasattr(self, 'len') and self.len != 0: length = self.len if length is None or length == 0: raise CreationError("A non-zero length must be specified with a " "float initialiser.") if length == 32: b = struct.pack('>f', f) elif length == 64: b = struct.pack('>d', f) else: raise CreationError("floats can only be 32 or 64 bits long, " "not {0} bits", length) self._setbytes_unsafe(bytearray(b), length, 0) def _readfloat(self, length, start): """Read bits and interpret as a float.""" if not (start + self._offset) % 8: startbyte = (start + self._offset) // 8 if length == 32: f, = struct.unpack('>f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4))) elif length == 64: f, = struct.unpack('>d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8))) else: if length == 32: f, = struct.unpack('>f', self._readbytes(32, start)) elif length == 64: f, = struct.unpack('>d', self._readbytes(64, start)) try: return f except NameError: raise InterpretError("floats can only be 32 or 64 bits long, not {0} bits", length) def _getfloat(self): """Interpret the whole bitstring as a float.""" return self._readfloat(self.len, 0) def _setfloatle(self, f, length=None): # If no length given, and we've previously been given a length, use it. if length is None and hasattr(self, 'len') and self.len != 0: length = self.len if length is None or length == 0: raise CreationError("A non-zero length must be specified with a " "float initialiser.") if length == 32: b = struct.pack('<f', f) elif length == 64: b = struct.pack('<d', f) else: raise CreationError("floats can only be 32 or 64 bits long, " "not {0} bits", length) self._setbytes_unsafe(bytearray(b), length, 0) def _readfloatle(self, length, start): """Read bits and interpret as a little-endian float.""" startbyte, offset = divmod(start + self._offset, 8) if not offset: if length == 32: f, = struct.unpack('<f', bytes(self._datastore.getbyteslice(startbyte, startbyte + 4))) elif length == 64: f, = struct.unpack('<d', bytes(self._datastore.getbyteslice(startbyte, startbyte + 8))) else: if length == 32: f, = struct.unpack('<f', self._readbytes(32, start)) elif length == 64: f, = struct.unpack('<d', self._readbytes(64, start)) try: return f except NameError: raise InterpretError("floats can only be 32 or 64 bits long, " "not {0} bits", length) def _getfloatle(self): """Interpret the whole bitstring as a little-endian float.""" return self._readfloatle(self.len, 0) def _setue(self, i): """Initialise bitstring with unsigned exponential-Golomb code for integer i. Raises CreationError if i < 0. """ if i < 0: raise CreationError("Cannot use negative initialiser for unsigned " "exponential-Golomb.") if not i: self._setbin_unsafe('1') return tmp = i + 1 leadingzeros = -1 while tmp > 0: tmp >>= 1 leadingzeros += 1 remainingpart = i + 1 - (1 << leadingzeros) binstring = '0' * leadingzeros + '1' + Bits(uint=remainingpart, length=leadingzeros).bin self._setbin_unsafe(binstring) def _readue(self, pos): """Return interpretation of next bits as unsigned exponential-Golomb code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ oldpos = pos try: while not self[pos]: pos += 1 except IndexError: raise ReadError("Read off end of bitstring trying to read code.") leadingzeros = pos - oldpos codenum = (1 << leadingzeros) - 1 if leadingzeros > 0: if pos + leadingzeros + 1 > self.len: raise ReadError("Read off end of bitstring trying to read code.") codenum += self._readuint(leadingzeros, pos + 1) pos += leadingzeros + 1 else: assert codenum == 0 pos += 1 return codenum, pos def _getue(self): """Return data as unsigned exponential-Golomb code. Raises InterpretError if bitstring is not a single exponential-Golomb code. """ try: value, newpos = self._readue(0) if value is None or newpos != self.len: raise ReadError except ReadError: raise InterpretError("Bitstring is not a single exponential-Golomb code.") return value def _setse(self, i): """Initialise bitstring with signed exponential-Golomb code for integer i.""" if i > 0: u = (i * 2) - 1 else: u = -2 * i self._setue(u) def _getse(self): """Return data as signed exponential-Golomb code. Raises InterpretError if bitstring is not a single exponential-Golomb code. """ try: value, newpos = self._readse(0) if value is None or newpos != self.len: raise ReadError except ReadError: raise InterpretError("Bitstring is not a single exponential-Golomb code.") return value def _readse(self, pos): """Return interpretation of next bits as a signed exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ codenum, pos = self._readue(pos) m = (codenum + 1) // 2 if not codenum % 2: return -m, pos else: return m, pos def _setuie(self, i): """Initialise bitstring with unsigned interleaved exponential-Golomb code for integer i. Raises CreationError if i < 0. """ if i < 0: raise CreationError("Cannot use negative initialiser for unsigned " "interleaved exponential-Golomb.") self._setbin_unsafe('1' if i == 0 else '0' + '0'.join(bin(i + 1)[3:]) + '1') def _readuie(self, pos): """Return interpretation of next bits as unsigned interleaved exponential-Golomb code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ try: codenum = 1 while not self[pos]: pos += 1 codenum <<= 1 codenum += self[pos] pos += 1 pos += 1 except IndexError: raise ReadError("Read off end of bitstring trying to read code.") codenum -= 1 return codenum, pos def _getuie(self): """Return data as unsigned interleaved exponential-Golomb code. Raises InterpretError if bitstring is not a single exponential-Golomb code. """ try: value, newpos = self._readuie(0) if value is None or newpos != self.len: raise ReadError except ReadError: raise InterpretError("Bitstring is not a single interleaved exponential-Golomb code.") return value def _setsie(self, i): """Initialise bitstring with signed interleaved exponential-Golomb code for integer i.""" if not i: self._setbin_unsafe('1') else: self._setuie(abs(i)) self._append(Bits([i < 0])) def _getsie(self): """Return data as signed interleaved exponential-Golomb code. Raises InterpretError if bitstring is not a single exponential-Golomb code. """ try: value, newpos = self._readsie(0) if value is None or newpos != self.len: raise ReadError except ReadError: raise InterpretError("Bitstring is not a single interleaved exponential-Golomb code.") return value def _readsie(self, pos): """Return interpretation of next bits as a signed interleaved exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ codenum, pos = self._readuie(pos) if not codenum: return 0, pos try: if self[pos]: return -codenum, pos + 1 else: return codenum, pos + 1 except IndexError: raise ReadError("Read off end of bitstring trying to read code.") def _setbool(self, value): # We deliberately don't want to have implicit conversions to bool here. # If we did then it would be difficult to deal with the 'False' string. if value in (1, 'True'): self._setbytes_unsafe(bytearray(b'\x80'), 1, 0) elif value in (0, 'False'): self._setbytes_unsafe(bytearray(b'\x00'), 1, 0) else: raise CreationError('Cannot initialise boolean with {0}.', value) def _getbool(self): if self.length != 1: msg = "For a bool interpretation a bitstring must be 1 bit long, not {0} bits." raise InterpretError(msg, self.length) return self[0] def _readbool(self, pos): return self[pos], pos + 1 def _setbin_safe(self, binstring): """Reset the bitstring to the value given in binstring.""" binstring = tidy_input_string(binstring) # remove any 0b if present binstring = binstring.replace('0b', '') self._setbin_unsafe(binstring) def _setbin_unsafe(self, binstring): """Same as _setbin_safe, but input isn't sanity checked. binstring mustn't start with '0b'.""" length = len(binstring) # pad with zeros up to byte boundary if needed boundary = ((length + 7) // 8) * 8 padded_binstring = binstring + '0' * (boundary - length)\ if len(binstring) < boundary else binstring try: bytelist = [int(padded_binstring[x:x + 8], 2) for x in xrange(0, len(padded_binstring), 8)] except ValueError: raise CreationError("Invalid character in bin initialiser {0}.", binstring) self._setbytes_unsafe(bytearray(bytelist), length, 0) def _readbin(self, length, start): """Read bits and interpret as a binary string.""" if not length: return '' # Get the byte slice containing our bit slice startbyte, startoffset = divmod(start + self._offset, 8) endbyte = (start + self._offset + length - 1) // 8 b = self._datastore.getbyteslice(startbyte, endbyte + 1) # Convert to a string of '0' and '1's (via a hex string an and int!) try: c = "{:0{}b}".format(int(binascii.hexlify(b), 16), 8*len(b)) except TypeError: # Hack to get Python 2.6 working c = "{0:0{1}b}".format(int(binascii.hexlify(str(b)), 16), 8*len(b)) # Finally chop off any extra bits. return c[startoffset:startoffset + length] def _getbin(self): """Return interpretation as a binary string.""" return self._readbin(self.len, 0) def _setoct(self, octstring): """Reset the bitstring to have the value given in octstring.""" octstring = tidy_input_string(octstring) # remove any 0o if present octstring = octstring.replace('0o', '') binlist = [] for i in octstring: try: if not 0 <= int(i) < 8: raise ValueError binlist.append(OCT_TO_BITS[int(i)]) except ValueError: raise CreationError("Invalid symbol '{0}' in oct initialiser.", i) self._setbin_unsafe(''.join(binlist)) def _readoct(self, length, start): """Read bits and interpret as an octal string.""" if length % 3: raise InterpretError("Cannot convert to octal unambiguously - " "not multiple of 3 bits.") if not length: return '' # Get main octal bit by converting from int. # Strip starting 0 or 0o depending on Python version. end = oct(self._readuint(length, start))[LEADING_OCT_CHARS:] if end.endswith('L'): end = end[:-1] middle = '0' * (length // 3 - len(end)) return middle + end def _getoct(self): """Return interpretation as an octal string.""" return self._readoct(self.len, 0) def _sethex(self, hexstring): """Reset the bitstring to have the value given in hexstring.""" hexstring = tidy_input_string(hexstring) # remove any 0x if present hexstring = hexstring.replace('0x', '') length = len(hexstring) if length % 2: hexstring += '0' try: try: data = bytearray.fromhex(hexstring) except TypeError: # Python 2.6 needs a unicode string (a bug). 2.7 and 3.x work fine. data = bytearray.fromhex(unicode(hexstring)) except ValueError: raise CreationError("Invalid symbol in hex initialiser.") self._setbytes_unsafe(data, length * 4, 0) def _readhex(self, length, start): """Read bits and interpret as a hex string.""" if length % 4: raise InterpretError("Cannot convert to hex unambiguously - " "not multiple of 4 bits.") if not length: return '' # This monstrosity is the only thing I could get to work for both 2.6 and 3.1. # TODO: Is utf-8 really what we mean here? s = str(binascii.hexlify(self._slice(start, start + length).tobytes()).decode('utf-8')) # If there's one nibble too many then cut it off return s[:-1] if (length // 4) % 2 else s def _gethex(self): """Return the hexadecimal representation as a string prefixed with '0x'. Raises an InterpretError if the bitstring's length is not a multiple of 4. """ return self._readhex(self.len, 0) def _getoffset(self): return self._datastore.offset def _getlength(self): """Return the length of the bitstring in bits.""" return self._datastore.bitlength def _ensureinmemory(self): """Ensure the data is held in memory, not in a file.""" self._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength), self.len, self._offset) @classmethod def _converttobitstring(cls, bs, offset=0, cache={}): """Convert bs to a bitstring and return it. offset gives the suggested bit offset of first significant bit, to optimise append etc. """ if isinstance(bs, Bits): return bs try: return cache[(bs, offset)] except KeyError: if isinstance(bs, basestring): b = cls() try: _, tokens = tokenparser(bs) except ValueError as e: raise CreationError(*e.args) if tokens: b._append(Bits._init_with_token(*tokens[0])) b._datastore = offsetcopy(b._datastore, offset) for token in tokens[1:]: b._append(Bits._init_with_token(*token)) assert b._assertsanity() assert b.len == 0 or b._offset == offset if len(cache) < CACHE_SIZE: cache[(bs, offset)] = b return b except TypeError: # Unhashable type pass return cls(bs) def _copy(self): """Create and return a new copy of the Bits (always in memory).""" s_copy = self.__class__() s_copy._setbytes_unsafe(self._datastore.getbyteslice(0, self._datastore.bytelength), self.len, self._offset) return s_copy def _slice(self, start, end): """Used internally to get a slice, without error checking.""" if end == start: return self.__class__() offset = self._offset startbyte, newoffset = divmod(start + offset, 8) endbyte = (end + offset - 1) // 8 bs = self.__class__() bs._setbytes_unsafe(self._datastore.getbyteslice(startbyte, endbyte + 1), end - start, newoffset) return bs def _readtoken(self, name, pos, length): """Reads a token from the bitstring and returns the result.""" if length is not None and int(length) > self.length - pos: raise ReadError("Reading off the end of the data. " "Tried to read {0} bits when only {1} available.".format(int(length), self.length - pos)) try: val = name_to_read[name](self, length, pos) return val, pos + length except KeyError: if name == 'pad': return None, pos + length raise ValueError("Can't parse token {0}:{1}".format(name, length)) except TypeError: # This is for the 'ue', 'se' and 'bool' tokens. They will also return the new pos. return name_to_read[name](self, pos) def _append(self, bs): """Append a bitstring to the current bitstring.""" self._datastore._appendstore(bs._datastore) def _prepend(self, bs): """Prepend a bitstring to the current bitstring.""" self._datastore._prependstore(bs._datastore) def _reverse(self): """Reverse all bits in-place.""" # Reverse the contents of each byte n = [BYTE_REVERSAL_DICT[b] for b in self._datastore.rawbytes] # Then reverse the order of the bytes n.reverse() # The new offset is the number of bits that were unused at the end. newoffset = 8 - (self._offset + self.len) % 8 if newoffset == 8: newoffset = 0 self._setbytes_unsafe(bytearray().join(n), self.length, newoffset) def _truncatestart(self, bits): """Truncate bits from the start of the bitstring.""" assert 0 <= bits <= self.len if not bits: return if bits == self.len: self._clear() return bytepos, offset = divmod(self._offset + bits, 8) self._setbytes_unsafe(self._datastore.getbyteslice(bytepos, self._datastore.bytelength), self.len - bits, offset) assert self._assertsanity() def _truncateend(self, bits): """Truncate bits from the end of the bitstring.""" assert 0 <= bits <= self.len if not bits: return if bits == self.len: self._clear() return newlength_in_bytes = (self._offset + self.len - bits + 7) // 8 self._setbytes_unsafe(self._datastore.getbyteslice(0, newlength_in_bytes), self.len - bits, self._offset) assert self._assertsanity() def _insert(self, bs, pos): """Insert bs at pos.""" assert 0 <= pos <= self.len if pos > self.len // 2: # Inserting nearer end, so cut off end. end = self._slice(pos, self.len) self._truncateend(self.len - pos) self._append(bs) self._append(end) else: # Inserting nearer start, so cut off start. start = self._slice(0, pos) self._truncatestart(pos) self._prepend(bs) self._prepend(start) try: self._pos = pos + bs.len except AttributeError: pass assert self._assertsanity() def _overwrite(self, bs, pos): """Overwrite with bs at pos.""" assert 0 <= pos < self.len if bs is self: # Just overwriting with self, so do nothing. assert pos == 0 return firstbytepos = (self._offset + pos) // 8 lastbytepos = (self._offset + pos + bs.len - 1) // 8 bytepos, bitoffset = divmod(self._offset + pos, 8) if firstbytepos == lastbytepos: mask = ((1 << bs.len) - 1) << (8 - bs.len - bitoffset) self._datastore.setbyte(bytepos, self._datastore.getbyte(bytepos) & (~mask)) d = offsetcopy(bs._datastore, bitoffset) self._datastore.setbyte(bytepos, self._datastore.getbyte(bytepos) | (d.getbyte(0) & mask)) else: # Do first byte mask = (1 << (8 - bitoffset)) - 1 self._datastore.setbyte(bytepos, self._datastore.getbyte(bytepos) & (~mask)) d = offsetcopy(bs._datastore, bitoffset) self._datastore.setbyte(bytepos, self._datastore.getbyte(bytepos) | (d.getbyte(0) & mask)) # Now do all the full bytes self._datastore.setbyteslice(firstbytepos + 1, lastbytepos, d.getbyteslice(1, lastbytepos - firstbytepos)) # and finally the last byte bitsleft = (self._offset + pos + bs.len) % 8 if not bitsleft: bitsleft = 8 mask = (1 << (8 - bitsleft)) - 1 self._datastore.setbyte(lastbytepos, self._datastore.getbyte(lastbytepos) & mask) self._datastore.setbyte(lastbytepos, self._datastore.getbyte(lastbytepos) | (d.getbyte(d.bytelength - 1) & ~mask)) assert self._assertsanity() def _delete(self, bits, pos): """Delete bits at pos.""" assert 0 <= pos <= self.len assert pos + bits <= self.len if not pos: # Cutting bits off at the start. self._truncatestart(bits) return if pos + bits == self.len: # Cutting bits off at the end. self._truncateend(bits) return if pos > self.len - pos - bits: # More bits before cut point than after it, so do bit shifting # on the final bits. end = self._slice(pos + bits, self.len) assert self.len - pos > 0 self._truncateend(self.len - pos) self._append(end) return # More bits after the cut point than before it. start = self._slice(0, pos) self._truncatestart(pos + bits) self._prepend(start) return def _reversebytes(self, start, end): """Reverse bytes in-place.""" # Make the start occur on a byte boundary # TODO: We could be cleverer here to avoid changing the offset. newoffset = 8 - (start % 8) if newoffset == 8: newoffset = 0 self._datastore = offsetcopy(self._datastore, newoffset) # Now just reverse the byte data toreverse = bytearray(self._datastore.getbyteslice((newoffset + start) // 8, (newoffset + end) // 8)) toreverse.reverse() self._datastore.setbyteslice((newoffset + start) // 8, (newoffset + end) // 8, toreverse) def _set(self, pos): """Set bit at pos to 1.""" assert 0 <= pos < self.len self._datastore.setbit(pos) def _unset(self, pos): """Set bit at pos to 0.""" assert 0 <= pos < self.len self._datastore.unsetbit(pos) def _invert(self, pos): """Flip bit at pos 1<->0.""" assert 0 <= pos < self.len self._datastore.invertbit(pos) def _invert_all(self): """Invert every bit.""" set = self._datastore.setbyte get = self._datastore.getbyte for p in xrange(self._datastore.byteoffset, self._datastore.byteoffset + self._datastore.bytelength): set(p, 256 + ~get(p)) def _ilshift(self, n): """Shift bits by n to the left in place. Return self.""" assert 0 < n <= self.len self._append(Bits(n)) self._truncatestart(n) return self def _irshift(self, n): """Shift bits by n to the right in place. Return self.""" assert 0 < n <= self.len self._prepend(Bits(n)) self._truncateend(n) return self def _imul(self, n): """Concatenate n copies of self in place. Return self.""" assert n >= 0 if not n: self._clear() return self m = 1 old_len = self.len while m * 2 < n: self._append(self) m *= 2 self._append(self[0:(n - m) * old_len]) return self def _inplace_logical_helper(self, bs, f): """Helper function containing most of the __ior__, __iand__, __ixor__ code.""" # Give the two bitstrings the same offset (modulo 8) self_byteoffset, self_bitoffset = divmod(self._offset, 8) bs_byteoffset, bs_bitoffset = divmod(bs._offset, 8) if bs_bitoffset != self_bitoffset: if not self_bitoffset: bs._datastore = offsetcopy(bs._datastore, 0) else: self._datastore = offsetcopy(self._datastore, bs_bitoffset) a = self._datastore.rawbytes b = bs._datastore.rawbytes for i in xrange(len(a)): a[i] = f(a[i + self_byteoffset], b[i + bs_byteoffset]) return self def _ior(self, bs): return self._inplace_logical_helper(bs, operator.ior) def _iand(self, bs): return self._inplace_logical_helper(bs, operator.iand) def _ixor(self, bs): return self._inplace_logical_helper(bs, operator.xor) def _readbits(self, length, start): """Read some bits from the bitstring and return newly constructed bitstring.""" return self._slice(start, start + length) def _validate_slice(self, start, end): """Validate start and end and return them as positive bit positions.""" if start is None: start = 0 elif start < 0: start += self.len if end is None: end = self.len elif end < 0: end += self.len if not 0 <= end <= self.len: raise ValueError("end is not a valid position in the bitstring.") if not 0 <= start <= self.len: raise ValueError("start is not a valid position in the bitstring.") if end < start: raise ValueError("end must not be less than start.") return start, end def unpack(self, fmt, **kwargs): """Interpret the whole bitstring using fmt and return list. fmt -- A single string or a list of strings with comma separated tokens describing how to interpret the bits in the bitstring. Items can also be integers, for reading new bitstring of the given length. kwargs -- A dictionary or keyword-value pairs - the keywords used in the format string will be replaced with their given value. Raises ValueError if the format is not understood. If not enough bits are available then all bits to the end of the bitstring will be used. See the docstring for 'read' for token examples. """ return self._readlist(fmt, 0, **kwargs)[0] def _readlist(self, fmt, pos, **kwargs): tokens = [] stretchy_token = None if isinstance(fmt, basestring): fmt = [fmt] # Not very optimal this, but replace integers with 'bits' tokens # TODO: optimise for i, f in enumerate(fmt): if isinstance(f, numbers.Integral): fmt[i] = "bits:{0}".format(f) for f_item in fmt: stretchy, tkns = tokenparser(f_item, tuple(sorted(kwargs.keys()))) if stretchy: if stretchy_token: raise Error("It's not possible to have more than one 'filler' token.") stretchy_token = stretchy tokens.extend(tkns) if not stretchy_token: lst = [] for name, length, _ in tokens: if length in kwargs: length = kwargs[length] if name == 'bytes': length *= 8 if name in kwargs and length is None: # Using default 'uint' - the name is really the length. value, pos = self._readtoken('uint', pos, kwargs[name]) lst.append(value) continue value, pos = self._readtoken(name, pos, length) if value is not None: # Don't append pad tokens lst.append(value) return lst, pos stretchy_token = False bits_after_stretchy_token = 0 for token in tokens: name, length, _ = token if length in kwargs: length = kwargs[length] if name == 'bytes': length *= 8 if name in kwargs and length is None: # Default 'uint'. length = kwargs[name] if stretchy_token: if name in ('se', 'ue', 'sie', 'uie'): raise Error("It's not possible to parse a variable" "length token after a 'filler' token.") else: if length is None: raise Error("It's not possible to have more than " "one 'filler' token.") bits_after_stretchy_token += length if length is None and name not in ('se', 'ue', 'sie', 'uie'): assert not stretchy_token stretchy_token = token bits_left = self.len - pos return_values = [] for token in tokens: name, length, _ = token if token is stretchy_token: # Set length to the remaining bits length = max(bits_left - bits_after_stretchy_token, 0) if length in kwargs: length = kwargs[length] if name == 'bytes': length *= 8 if name in kwargs and length is None: # Default 'uint' length = kwargs[name] if length is not None: bits_left -= length value, pos = self._readtoken(name, pos, length) if value is not None: return_values.append(value) return return_values, pos def _findbytes(self, bytes_, start, end, bytealigned): """Quicker version of find when everything's whole byte and byte aligned. """ assert self._datastore.offset == 0 assert bytealigned is True # Extract data bytes from bitstring to be found. bytepos = (start + 7) // 8 found = False p = bytepos finalpos = end // 8 increment = max(1024, len(bytes_) * 10) buffersize = increment + len(bytes_) while p < finalpos: # Read in file or from memory in overlapping chunks and search the chunks. buf = bytearray(self._datastore.getbyteslice(p, min(p + buffersize, finalpos))) pos = buf.find(bytes_) if pos != -1: found = True p += pos break p += increment if not found: return () return (p * 8,) def _findregex(self, reg_ex, start, end, bytealigned): """Find first occurrence of a compiled regular expression. Note that this doesn't support arbitrary regexes, in particular they must match a known length. """ p = start length = len(reg_ex.pattern) # We grab overlapping chunks of the binary representation and # do an ordinary string search within that. increment = max(4096, length * 10) buffersize = increment + length while p < end: buf = self._readbin(min(buffersize, end - p), p) # Test using regular expressions... m = reg_ex.search(buf) if m: pos = m.start() # pos = buf.find(targetbin) # if pos != -1: # if bytealigned then we only accept byte aligned positions. if not bytealigned or (p + pos) % 8 == 0: return (p + pos,) if bytealigned: # Advance to just beyond the non-byte-aligned match and try again... p += pos + 1 continue p += increment # Not found, return empty tuple return () def find(self, bs, start=None, end=None, bytealigned=None): """Find first occurrence of substring bs. Returns a single item tuple with the bit position if found, or an empty tuple if not found. The bit position (pos property) will also be set to the start of the substring if it is found. bs -- The bitstring to find. start -- The bit position to start the search. Defaults to 0. end -- The bit position one past the last bit to search. Defaults to self.len. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty, if start < 0, if end > self.len or if end < start. >>> BitArray('0xc3e').find('0b1111') (6,) """ bs = Bits(bs) if not bs.len: raise ValueError("Cannot find an empty bitstring.") start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] if bytealigned and not bs.len % 8 and not self._datastore.offset: p = self._findbytes(bs.bytes, start, end, bytealigned) else: p = self._findregex(re.compile(bs._getbin()), start, end, bytealigned) # If called from a class that has a pos, set it try: self._pos = p[0] except (AttributeError, IndexError): pass return p def findall(self, bs, start=None, end=None, count=None, bytealigned=None): """Find all occurrences of bs. Return generator of bit positions. bs -- The bitstring to find. start -- The bit position to start the search. Defaults to 0. end -- The bit position one past the last bit to search. Defaults to self.len. count -- The maximum number of occurrences to find. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty, if start < 0, if end > self.len or if end < start. Note that all occurrences of bs are found, even if they overlap. """ if count is not None and count < 0: raise ValueError("In findall, count must be >= 0.") bs = Bits(bs) start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] c = 0 if bytealigned and not bs.len % 8 and not self._datastore.offset: # Use the quick find method f = self._findbytes x = bs._getbytes() else: f = self._findregex x = re.compile(bs._getbin()) while True: p = f(x, start, end, bytealigned) if not p: break if count is not None and c >= count: return c += 1 try: self._pos = p[0] except AttributeError: pass yield p[0] if bytealigned: start = p[0] + 8 else: start = p[0] + 1 if start >= end: break return def rfind(self, bs, start=None, end=None, bytealigned=None): """Find final occurrence of substring bs. Returns a single item tuple with the bit position if found, or an empty tuple if not found. The bit position (pos property) will also be set to the start of the substring if it is found. bs -- The bitstring to find. start -- The bit position to end the reverse search. Defaults to 0. end -- The bit position one past the first bit to reverse search. Defaults to self.len. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty, if start < 0, if end > self.len or if end < start. """ bs = Bits(bs) start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] if not bs.len: raise ValueError("Cannot find an empty bitstring.") # Search chunks starting near the end and then moving back # until we find bs. increment = max(8192, bs.len * 80) buffersize = min(increment + bs.len, end - start) pos = max(start, end - buffersize) while True: found = list(self.findall(bs, start=pos, end=pos + buffersize, bytealigned=bytealigned)) if not found: if pos == start: return () pos = max(start, pos - increment) continue return (found[-1],) def cut(self, bits, start=None, end=None, count=None): """Return bitstring generator by cutting into bits sized chunks. bits -- The size in bits of the bitstring chunks to generate. start -- The bit position to start the first cut. Defaults to 0. end -- The bit position one past the last bit to use in the cut. Defaults to self.len. count -- If specified then at most count items are generated. Default is to cut as many times as possible. """ start, end = self._validate_slice(start, end) if count is not None and count < 0: raise ValueError("Cannot cut - count must be >= 0.") if bits <= 0: raise ValueError("Cannot cut - bits must be >= 0.") c = 0 while count is None or c < count: c += 1 nextchunk = self._slice(start, min(start + bits, end)) if nextchunk.len != bits: return assert nextchunk._assertsanity() yield nextchunk start += bits return def split(self, delimiter, start=None, end=None, count=None, bytealigned=None): """Return bitstring generator by splittling using a delimiter. The first item returned is the initial bitstring before the delimiter, which may be an empty bitstring. delimiter -- The bitstring used as the divider. start -- The bit position to start the split. Defaults to 0. end -- The bit position one past the last bit to use in the split. Defaults to self.len. count -- If specified then at most count items are generated. Default is to split as many times as possible. bytealigned -- If True splits will only occur on byte boundaries. Raises ValueError if the delimiter is empty. """ delimiter = Bits(delimiter) if not delimiter.len: raise ValueError("split delimiter cannot be empty.") start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] if count is not None and count < 0: raise ValueError("Cannot split - count must be >= 0.") if count == 0: return if bytealigned and not delimiter.len % 8 and not self._datastore.offset: # Use the quick find method f = self._findbytes x = delimiter._getbytes() else: f = self._findregex x = re.compile(delimiter._getbin()) found = f(x, start, end, bytealigned) if not found: # Initial bits are the whole bitstring being searched yield self._slice(start, end) return # yield the bytes before the first occurrence of the delimiter, even if empty yield self._slice(start, found[0]) startpos = pos = found[0] c = 1 while count is None or c < count: pos += delimiter.len found = f(x, pos, end, bytealigned) if not found: # No more occurrences, so return the rest of the bitstring yield self._slice(startpos, end) return c += 1 yield self._slice(startpos, found[0]) startpos = pos = found[0] # Have generated count bitstrings, so time to quit. return def join(self, sequence): """Return concatenation of bitstrings joined by self. sequence -- A sequence of bitstrings. """ s = self.__class__() i = iter(sequence) print 'join...' try: nn=next(i) print nn dd=Bits(nn) print dd s._append(dd) while True: n = next(i) s._append(self) s._append(Bits(n)) except StopIteration: pass return s def tobytes(self): """Return the bitstring as bytes, padding with zero bits if needed. Up to seven zero bits will be added at the end to byte align. """ d = offsetcopy(self._datastore, 0).rawbytes # Need to ensure that unused bits at end are set to zero unusedbits = 8 - self.len % 8 if unusedbits != 8: d[-1] &= (0xff << unusedbits) return bytes(d) def tofile(self, f): """Write the bitstring to a file object, padding with zero bits if needed. Up to seven zero bits will be added at the end to byte align. """ # If the bitstring is file based then we don't want to read it all # in to memory. chunksize = 1024 * 1024 # 1 MB chunks if not self._offset: a = 0 bytelen = self._datastore.bytelength p = self._datastore.getbyteslice(a, min(a + chunksize, bytelen - 1)) while len(p) == chunksize: f.write(p) a += chunksize p = self._datastore.getbyteslice(a, min(a + chunksize, bytelen - 1)) f.write(p) # Now the final byte, ensuring that unused bits at end are set to 0. bits_in_final_byte = self.len % 8 if not bits_in_final_byte: bits_in_final_byte = 8 f.write(self[-bits_in_final_byte:].tobytes()) else: # Really quite inefficient... a = 0 b = a + chunksize * 8 while b <= self.len: f.write(self._slice(a, b)._getbytes()) a += chunksize * 8 b += chunksize * 8 if a != self.len: f.write(self._slice(a, self.len).tobytes()) def startswith(self, prefix, start=None, end=None): """Return whether the current bitstring starts with prefix. prefix -- The bitstring to search for. start -- The bit position to start from. Defaults to 0. end -- The bit position to end at. Defaults to self.len. """ prefix = Bits(prefix) start, end = self._validate_slice(start, end) if end < start + prefix.len: return False end = start + prefix.len return self._slice(start, end) == prefix def endswith(self, suffix, start=None, end=None): """Return whether the current bitstring ends with suffix. suffix -- The bitstring to search for. start -- The bit position to start from. Defaults to 0. end -- The bit position to end at. Defaults to self.len. """ suffix = Bits(suffix) start, end = self._validate_slice(start, end) if start + suffix.len > end: return False start = end - suffix.len return self._slice(start, end) == suffix def all(self, value, pos=None): """Return True if one or many bits are all set to value. value -- If value is True then checks for bits set to 1, otherwise checks for bits set to 0. pos -- An iterable of bit positions. Negative numbers are treated in the same way as slice indices. Defaults to the whole bitstring. """ value = bool(value) length = self.len if pos is None: pos = xrange(self.len) for p in pos: if p < 0: p += length if not 0 <= p < length: raise IndexError("Bit position {0} out of range.".format(p)) if not self._datastore.getbit(p) is value: return False return True def any(self, value, pos=None): """Return True if any of one or many bits are set to value. value -- If value is True then checks for bits set to 1, otherwise checks for bits set to 0. pos -- An iterable of bit positions. Negative numbers are treated in the same way as slice indices. Defaults to the whole bitstring. """ value = bool(value) length = self.len if pos is None: pos = xrange(self.len) for p in pos: if p < 0: p += length if not 0 <= p < length: raise IndexError("Bit position {0} out of range.".format(p)) if self._datastore.getbit(p) is value: return True return False def count(self, value): """Return count of total number of either zero or one bits. value -- If True then bits set to 1 are counted, otherwise bits set to 0 are counted. >>> Bits('0xef').count(1) 7 """ if not self.len: return 0 # count the number of 1s (from which it's easy to work out the 0s). # Don't count the final byte yet. count = sum(BIT_COUNT[self._datastore.getbyte(i)] for i in xrange(self._datastore.bytelength - 1)) # adjust for bits at start that aren't part of the bitstring if self._offset: count -= BIT_COUNT[self._datastore.getbyte(0) >> (8 - self._offset)] # and count the last 1 - 8 bits at the end. endbits = self._datastore.bytelength * 8 - (self._offset + self.len) count += BIT_COUNT[self._datastore.getbyte(self._datastore.bytelength - 1) >> endbits] return count if value else self.len - count # Create native-endian functions as aliases depending on the byteorder if byteorder == 'little': _setfloatne = _setfloatle _readfloatne = _readfloatle _getfloatne = _getfloatle _setuintne = _setuintle _readuintne = _readuintle _getuintne = _getuintle _setintne = _setintle _readintne = _readintle _getintne = _getintle else: _setfloatne = _setfloat _readfloatne = _readfloat _getfloatne = _getfloat _setuintne = _setuintbe _readuintne = _readuintbe _getuintne = _getuintbe _setintne = _setintbe _readintne = _readintbe _getintne = _getintbe _offset = property(_getoffset) len = property(_getlength, doc="""The length of the bitstring in bits. Read only. """) length = property(_getlength, doc="""The length of the bitstring in bits. Read only. """) bool = property(_getbool, doc="""The bitstring as a bool (True or False). Read only. """) hex = property(_gethex, doc="""The bitstring as a hexadecimal string. Read only. """) bin = property(_getbin, doc="""The bitstring as a binary string. Read only. """) oct = property(_getoct, doc="""The bitstring as an octal string. Read only. """) bytes = property(_getbytes, doc="""The bitstring as a bytes object. Read only. """) int = property(_getint, doc="""The bitstring as a two's complement signed int. Read only. """) uint = property(_getuint, doc="""The bitstring as a two's complement unsigned int. Read only. """) float = property(_getfloat, doc="""The bitstring as a floating point number. Read only. """) intbe = property(_getintbe, doc="""The bitstring as a two's complement big-endian signed int. Read only. """) uintbe = property(_getuintbe, doc="""The bitstring as a two's complement big-endian unsigned int. Read only. """) floatbe = property(_getfloat, doc="""The bitstring as a big-endian floating point number. Read only. """) intle = property(_getintle, doc="""The bitstring as a two's complement little-endian signed int. Read only. """) uintle = property(_getuintle, doc="""The bitstring as a two's complement little-endian unsigned int. Read only. """) floatle = property(_getfloatle, doc="""The bitstring as a little-endian floating point number. Read only. """) intne = property(_getintne, doc="""The bitstring as a two's complement native-endian signed int. Read only. """) uintne = property(_getuintne, doc="""The bitstring as a two's complement native-endian unsigned int. Read only. """) floatne = property(_getfloatne, doc="""The bitstring as a native-endian floating point number. Read only. """) ue = property(_getue, doc="""The bitstring as an unsigned exponential-Golomb code. Read only. """) se = property(_getse, doc="""The bitstring as a signed exponential-Golomb code. Read only. """) uie = property(_getuie, doc="""The bitstring as an unsigned interleaved exponential-Golomb code. Read only. """) sie = property(_getsie, doc="""The bitstring as a signed interleaved exponential-Golomb code. Read only. """) # Dictionary that maps token names to the function that reads them. name_to_read = {'uint': Bits._readuint, 'uintle': Bits._readuintle, 'uintbe': Bits._readuintbe, 'uintne': Bits._readuintne, 'int': Bits._readint, 'intle': Bits._readintle, 'intbe': Bits._readintbe, 'intne': Bits._readintne, 'float': Bits._readfloat, 'floatbe': Bits._readfloat, # floatbe is a synonym for float 'floatle': Bits._readfloatle, 'floatne': Bits._readfloatne, 'hex': Bits._readhex, 'oct': Bits._readoct, 'bin': Bits._readbin, 'bits': Bits._readbits, 'bytes': Bits._readbytes, 'ue': Bits._readue, 'se': Bits._readse, 'uie': Bits._readuie, 'sie': Bits._readsie, 'bool': Bits._readbool, } # Dictionaries for mapping init keywords with init functions. init_with_length_and_offset = {'bytes': Bits._setbytes_safe, 'filename': Bits._setfile, } init_with_length_only = {'uint': Bits._setuint, 'int': Bits._setint, 'float': Bits._setfloat, 'uintbe': Bits._setuintbe, 'intbe': Bits._setintbe, 'floatbe': Bits._setfloat, 'uintle': Bits._setuintle, 'intle': Bits._setintle, 'floatle': Bits._setfloatle, 'uintne': Bits._setuintne, 'intne': Bits._setintne, 'floatne': Bits._setfloatne, } init_without_length_or_offset = {'bin': Bits._setbin_safe, 'hex': Bits._sethex, 'oct': Bits._setoct, 'ue': Bits._setue, 'se': Bits._setse, 'uie': Bits._setuie, 'sie': Bits._setsie, 'bool': Bits._setbool, } class BitArray(Bits): """A container holding a mutable sequence of bits. Subclass of the immutable Bits class. Inherits all of its methods (except __hash__) and adds mutating methods. Mutating methods: append() -- Append a bitstring. byteswap() -- Change byte endianness in-place. insert() -- Insert a bitstring. invert() -- Flip bit(s) between one and zero. overwrite() -- Overwrite a section with a new bitstring. prepend() -- Prepend a bitstring. replace() -- Replace occurrences of one bitstring with another. reverse() -- Reverse bits in-place. rol() -- Rotate bits to the left. ror() -- Rotate bits to the right. set() -- Set bit(s) to 1 or 0. Methods inherited from Bits: all() -- Check if all specified bits are set to 1 or 0. any() -- Check if any of specified bits are set to 1 or 0. count() -- Count the number of bits set to 1 or 0. cut() -- Create generator of constant sized chunks. endswith() -- Return whether the bitstring ends with a sub-string. find() -- Find a sub-bitstring in the current bitstring. findall() -- Find all occurrences of a sub-bitstring in the current bitstring. join() -- Join bitstrings together using current bitstring. rfind() -- Seek backwards to find a sub-bitstring. split() -- Create generator of chunks split by a delimiter. startswith() -- Return whether the bitstring starts with a sub-bitstring. tobytes() -- Return bitstring as bytes, padding if needed. tofile() -- Write bitstring to file, padding if needed. unpack() -- Interpret bits using format string. Special methods: Mutating operators are available: [], <<=, >>=, +=, *=, &=, |= and ^= in addition to the inherited [], ==, !=, +, *, ~, <<, >>, &, | and ^. Properties: bin -- The bitstring as a binary string. bool -- For single bit bitstrings, interpret as True or False. bytepos -- The current byte position in the bitstring. bytes -- The bitstring as a bytes object. float -- Interpret as a floating point number. floatbe -- Interpret as a big-endian floating point number. floatle -- Interpret as a little-endian floating point number. floatne -- Interpret as a native-endian floating point number. hex -- The bitstring as a hexadecimal string. int -- Interpret as a two's complement signed integer. intbe -- Interpret as a big-endian signed integer. intle -- Interpret as a little-endian signed integer. intne -- Interpret as a native-endian signed integer. len -- Length of the bitstring in bits. oct -- The bitstring as an octal string. pos -- The current bit position in the bitstring. se -- Interpret as a signed exponential-Golomb code. ue -- Interpret as an unsigned exponential-Golomb code. sie -- Interpret as a signed interleaved exponential-Golomb code. uie -- Interpret as an unsigned interleaved exponential-Golomb code. uint -- Interpret as a two's complement unsigned integer. uintbe -- Interpret as a big-endian unsigned integer. uintle -- Interpret as a little-endian unsigned integer. uintne -- Interpret as a native-endian unsigned integer. """ __slots__ = () # As BitArray objects are mutable, we shouldn't allow them to be hashed. __hash__ = None def __init__(self, auto=None, length=None, offset=None, **kwargs): """Either specify an 'auto' initialiser: auto -- a string of comma separated tokens, an integer, a file object, a bytearray, a boolean iterable or another bitstring. Or initialise via **kwargs with one (and only one) of: bytes -- raw data as a string, for example read from a binary file. bin -- binary string representation, e.g. '0b001010'. hex -- hexadecimal string representation, e.g. '0x2ef' oct -- octal string representation, e.g. '0o777'. uint -- an unsigned integer. int -- a signed integer. float -- a floating point number. uintbe -- an unsigned big-endian whole byte integer. intbe -- a signed big-endian whole byte integer. floatbe - a big-endian floating point number. uintle -- an unsigned little-endian whole byte integer. intle -- a signed little-endian whole byte integer. floatle -- a little-endian floating point number. uintne -- an unsigned native-endian whole byte integer. intne -- a signed native-endian whole byte integer. floatne -- a native-endian floating point number. se -- a signed exponential-Golomb code. ue -- an unsigned exponential-Golomb code. sie -- a signed interleaved exponential-Golomb code. uie -- an unsigned interleaved exponential-Golomb code. bool -- a boolean (True or False). filename -- a file which will be opened in binary read-only mode. Other keyword arguments: length -- length of the bitstring in bits, if needed and appropriate. It must be supplied for all integer and float initialisers. offset -- bit offset to the data. These offset bits are ignored and this is intended for use when initialising using 'bytes' or 'filename'. """ # For mutable BitArrays we always read in files to memory: if not isinstance(self._datastore, ByteStore): self._ensureinmemory() def __new__(cls, auto=None, length=None, offset=None, **kwargs): x = super(BitArray, cls).__new__(cls) y = Bits.__new__(BitArray, auto, length, offset, **kwargs) x._datastore = y._datastore return x def __iadd__(self, bs): """Append bs to current bitstring. Return self. bs -- the bitstring to append. """ self.append(bs) return self def __copy__(self): """Return a new copy of the BitArray.""" s_copy = BitArray() if not isinstance(self._datastore, ByteStore): # Let them both point to the same (invariant) array. # If either gets modified then at that point they'll be read into memory. s_copy._datastore = self._datastore else: s_copy._datastore = copy.copy(self._datastore) return s_copy def __setitem__(self, key, value): """Set item or range to new value. Indices are in units of the step parameter (default 1 bit). Stepping is used to specify the number of bits in each item. If the length of the bitstring is changed then pos will be moved to after the inserted section, otherwise it will remain unchanged. >>> s = BitArray('0xff') >>> s[0:1:4] = '0xe' >>> print s '0xef' >>> s[4:4] = '0x00' >>> print s '0xe00f' """ try: # A slice start, step = 0, 1 if key.step is not None: step = key.step except AttributeError: # single element if key < 0: key += self.len if not 0 <= key < self.len: raise IndexError("Slice index out of range.") if isinstance(value, numbers.Integral): if not value: self._unset(key) return if value in (1, -1): self._set(key) return raise ValueError("Cannot set a single bit with integer {0}.".format(value)) value = Bits(value) if value.len == 1: # TODO: this can't be optimal if value[0]: self._set(key) else: self._unset(key) else: self._delete(1, key) self._insert(value, key) return else: if step != 1: # convert to binary string and use string slicing # TODO: Horribly inefficent temp = list(self._getbin()) v = list(Bits(value)._getbin()) temp.__setitem__(key, v) self._setbin_unsafe(''.join(temp)) return # If value is an integer then we want to set the slice to that # value rather than initialise a new bitstring of that length. if not isinstance(value, numbers.Integral): try: # TODO: Better way than calling constructor here? value = Bits(value) except TypeError: raise TypeError("Bitstring, integer or string expected. " "Got {0}.".format(type(value))) if key.start is not None: start = key.start if key.start < 0: start += self.len if start < 0: start = 0 stop = self.len if key.stop is not None: stop = key.stop if key.stop < 0: stop += self.len if start > stop: # The standard behaviour for lists is to just insert at the # start position if stop < start and step == 1. stop = start if isinstance(value, numbers.Integral): if value >= 0: value = self.__class__(uint=value, length=stop - start) else: value = self.__class__(int=value, length=stop - start) stop = min(stop, self.len) start = max(start, 0) start = min(start, stop) if (stop - start) == value.len: if not value.len: return if step >= 0: self._overwrite(value, start) else: self._overwrite(value.__getitem__(slice(None, None, 1)), start) else: # TODO: A delete then insert is wasteful - it could do unneeded shifts. # Could be either overwrite + insert or overwrite + delete. self._delete(stop - start, start) if step >= 0: self._insert(value, start) else: self._insert(value.__getitem__(slice(None, None, 1)), start) # pos is now after the inserted piece. return def __delitem__(self, key): """Delete item or range. Indices are in units of the step parameter (default 1 bit). Stepping is used to specify the number of bits in each item. >>> a = BitArray('0x001122') >>> del a[1:2:8] >>> print a 0x0022 """ try: # A slice start = 0 step = key.step if key.step is not None else 1 except AttributeError: # single element if key < 0: key += self.len if not 0 <= key < self.len: raise IndexError("Slice index out of range.") self._delete(1, key) return else: if step != 1: # convert to binary string and use string slicing # TODO: Horribly inefficent temp = list(self._getbin()) temp.__delitem__(key) self._setbin_unsafe(''.join(temp)) return stop = key.stop if key.start is not None: start = key.start if key.start < 0 and stop is None: start += self.len if start < 0: start = 0 if stop is None: stop = self.len if start > stop: return stop = min(stop, self.len) start = max(start, 0) start = min(start, stop) self._delete(stop - start, start) return def __ilshift__(self, n): """Shift bits by n to the left in place. Return self. n -- the number of bits to shift. Must be >= 0. """ if n < 0: raise ValueError("Cannot shift by a negative amount.") if not self.len: raise ValueError("Cannot shift an empty bitstring.") if not n: return self n = min(n, self.len) return self._ilshift(n) def __irshift__(self, n): """Shift bits by n to the right in place. Return self. n -- the number of bits to shift. Must be >= 0. """ if n < 0: raise ValueError("Cannot shift by a negative amount.") if not self.len: raise ValueError("Cannot shift an empty bitstring.") if not n: return self n = min(n, self.len) return self._irshift(n) def __imul__(self, n): """Concatenate n copies of self in place. Return self. Called for expressions of the form 'a *= 3'. n -- The number of concatenations. Must be >= 0. """ if n < 0: raise ValueError("Cannot multiply by a negative integer.") return self._imul(n) def __ior__(self, bs): bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for |= operator.") return self._ior(bs) def __iand__(self, bs): bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for &= operator.") return self._iand(bs) def __ixor__(self, bs): bs = Bits(bs) if self.len != bs.len: raise ValueError("Bitstrings must have the same length " "for ^= operator.") return self._ixor(bs) def replace(self, old, new, start=None, end=None, count=None, bytealigned=None): """Replace all occurrences of old with new in place. Returns number of replacements made. old -- The bitstring to replace. new -- The replacement bitstring. start -- Any occurrences that start before this will not be replaced. Defaults to 0. end -- Any occurrences that finish after this will not be replaced. Defaults to self.len. count -- The maximum number of replacements to make. Defaults to replace all occurrences. bytealigned -- If True replacements will only be made on byte boundaries. Raises ValueError if old is empty or if start or end are out of range. """ old = Bits(old) new = Bits(new) if not old.len: raise ValueError("Empty bitstring cannot be replaced.") start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] # Adjust count for use in split() if count is not None: count += 1 sections = self.split(old, start, end, count, bytealigned) lengths = [s.len for s in sections] if len(lengths) == 1: # Didn't find anything to replace. return 0 # no replacements done if new is self: # Prevent self assignment woes new = copy.copy(self) positions = [lengths[0] + start] for l in lengths[1:-1]: # Next position is the previous one plus the length of the next section. positions.append(positions[-1] + l) # We have all the positions that need replacements. We do them # in reverse order so that they won't move around as we replace. positions.reverse() try: # Need to calculate new pos, if this is a bitstream newpos = self._pos for p in positions: self[p:p + old.len] = new if old.len != new.len: diff = new.len - old.len for p in positions: if p >= newpos: continue if p + old.len <= newpos: newpos += diff else: newpos = p self._pos = newpos except AttributeError: for p in positions: self[p:p + old.len] = new assert self._assertsanity() return len(lengths) - 1 def insert(self, bs, pos=None): """Insert bs at bit position pos. bs -- The bitstring to insert. pos -- The bit position to insert at. Raises ValueError if pos < 0 or pos > self.len. """ bs = Bits(bs) if not bs.len: return self if bs is self: bs = self.__copy__() if pos is None: try: pos = self._pos except AttributeError: raise TypeError("insert require a bit position for this type.") if pos < 0: pos += self.len if not 0 <= pos <= self.len: raise ValueError("Invalid insert position.") self._insert(bs, pos) def overwrite(self, bs, pos=None): """Overwrite with bs at bit position pos. bs -- The bitstring to overwrite with. pos -- The bit position to begin overwriting from. Raises ValueError if pos < 0 or pos + bs.len > self.len """ bs = Bits(bs) if not bs.len: return if pos is None: try: pos = self._pos except AttributeError: raise TypeError("overwrite require a bit position for this type.") if pos < 0: pos += self.len if pos < 0 or pos + bs.len > self.len: raise ValueError("Overwrite exceeds boundary of bitstring.") self._overwrite(bs, pos) try: self._pos = pos + bs.len except AttributeError: pass def append(self, bs): """Append a bitstring to the current bitstring. bs -- The bitstring to append. """ # The offset is a hint to make bs easily appendable. bs = self._converttobitstring(bs, offset=(self.len + self._offset) % 8) self._append(bs) def prepend(self, bs): """Prepend a bitstring to the current bitstring. bs -- The bitstring to prepend. """ bs = Bits(bs) self._prepend(bs) def reverse(self, start=None, end=None): """Reverse bits in-place. start -- Position of first bit to reverse. Defaults to 0. end -- One past the position of the last bit to reverse. Defaults to self.len. Using on an empty bitstring will have no effect. Raises ValueError if start < 0, end > self.len or end < start. """ start, end = self._validate_slice(start, end) if start == 0 and end == self.len: self._reverse() return s = self._slice(start, end) s._reverse() self[start:end] = s def set(self, value, pos=None): """Set one or many bits to 1 or 0. value -- If True bits are set to 1, otherwise they are set to 0. pos -- Either a single bit position or an iterable of bit positions. Negative numbers are treated in the same way as slice indices. Defaults to the entire bitstring. Raises IndexError if pos < -self.len or pos >= self.len. """ f = self._set if value else self._unset if pos is None: pos = xrange(self.len) try: length = self.len for p in pos: if p < 0: p += length if not 0 <= p < length: raise IndexError("Bit position {0} out of range.".format(p)) f(p) except TypeError: # Single pos if pos < 0: pos += self.len if not 0 <= pos < length: raise IndexError("Bit position {0} out of range.".format(pos)) f(pos) def invert(self, pos=None): """Invert one or many bits from 0 to 1 or vice versa. pos -- Either a single bit position or an iterable of bit positions. Negative numbers are treated in the same way as slice indices. Raises IndexError if pos < -self.len or pos >= self.len. """ if pos is None: self._invert_all() return if not isinstance(pos, collections.Iterable): pos = (pos,) length = self.len for p in pos: if p < 0: p += length if not 0 <= p < length: raise IndexError("Bit position {0} out of range.".format(p)) self._invert(p) def ror(self, bits, start=None, end=None): """Rotate bits to the right in-place. bits -- The number of bits to rotate by. start -- Start of slice to rotate. Defaults to 0. end -- End of slice to rotate. Defaults to self.len. Raises ValueError if bits < 0. """ if not self.len: raise Error("Cannot rotate an empty bitstring.") if bits < 0: raise ValueError("Cannot rotate right by negative amount.") start, end = self._validate_slice(start, end) bits %= (end - start) if not bits: return rhs = self._slice(end - bits, end) self._delete(bits, end - bits) self._insert(rhs, start) def rol(self, bits, start=None, end=None): """Rotate bits to the left in-place. bits -- The number of bits to rotate by. start -- Start of slice to rotate. Defaults to 0. end -- End of slice to rotate. Defaults to self.len. Raises ValueError if bits < 0. """ if not self.len: raise Error("Cannot rotate an empty bitstring.") if bits < 0: raise ValueError("Cannot rotate left by negative amount.") start, end = self._validate_slice(start, end) bits %= (end - start) if not bits: return lhs = self._slice(start, start + bits) self._delete(bits, start) self._insert(lhs, end - bits) def byteswap(self, fmt=None, start=None, end=None, repeat=True): """Change the endianness in-place. Return number of repeats of fmt done. fmt -- A compact structure string, an integer number of bytes or an iterable of integers. Defaults to 0, which byte reverses the whole bitstring. start -- Start bit position, defaults to 0. end -- End bit position, defaults to self.len. repeat -- If True (the default) the byte swapping pattern is repeated as much as possible. """ start, end = self._validate_slice(start, end) if fmt is None or fmt == 0: # reverse all of the whole bytes. bytesizes = [(end - start) // 8] elif isinstance(fmt, numbers.Integral): if fmt < 0: raise ValueError("Improper byte length {0}.".format(fmt)) bytesizes = [fmt] elif isinstance(fmt, basestring): m = STRUCT_PACK_RE.match(fmt) if not m: raise ValueError("Cannot parse format string {0}.".format(fmt)) # Split the format string into a list of 'q', '4h' etc. formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt')) # Now deal with multiplicative factors, 4h -> hhhh etc. bytesizes = [] for f in formatlist: if len(f) == 1: bytesizes.append(PACK_CODE_SIZE[f]) else: bytesizes.extend([PACK_CODE_SIZE[f[-1]]] * int(f[:-1])) elif isinstance(fmt, collections.Iterable): bytesizes = fmt for bytesize in bytesizes: if not isinstance(bytesize, numbers.Integral) or bytesize < 0: raise ValueError("Improper byte length {0}.".format(bytesize)) else: raise TypeError("Format must be an integer, string or iterable.") repeats = 0 totalbitsize = 8 * sum(bytesizes) if not totalbitsize: return 0 if repeat: # Try to repeat up to the end of the bitstring. finalbit = end else: # Just try one (set of) byteswap(s). finalbit = start + totalbitsize for patternend in xrange(start + totalbitsize, finalbit + 1, totalbitsize): bytestart = patternend - totalbitsize for bytesize in bytesizes: byteend = bytestart + bytesize * 8 self._reversebytes(bytestart, byteend) bytestart += bytesize * 8 repeats += 1 return repeats def clear(self): """Remove all bits, reset to zero length.""" self._clear() def copy(self): """Return a copy of the bitstring.""" return self._copy() int = property(Bits._getint, Bits._setint, doc="""The bitstring as a two's complement signed int. Read and write. """) uint = property(Bits._getuint, Bits._setuint, doc="""The bitstring as a two's complement unsigned int. Read and write. """) float = property(Bits._getfloat, Bits._setfloat, doc="""The bitstring as a floating point number. Read and write. """) intbe = property(Bits._getintbe, Bits._setintbe, doc="""The bitstring as a two's complement big-endian signed int. Read and write. """) uintbe = property(Bits._getuintbe, Bits._setuintbe, doc="""The bitstring as a two's complement big-endian unsigned int. Read and write. """) floatbe = property(Bits._getfloat, Bits._setfloat, doc="""The bitstring as a big-endian floating point number. Read and write. """) intle = property(Bits._getintle, Bits._setintle, doc="""The bitstring as a two's complement little-endian signed int. Read and write. """) uintle = property(Bits._getuintle, Bits._setuintle, doc="""The bitstring as a two's complement little-endian unsigned int. Read and write. """) floatle = property(Bits._getfloatle, Bits._setfloatle, doc="""The bitstring as a little-endian floating point number. Read and write. """) intne = property(Bits._getintne, Bits._setintne, doc="""The bitstring as a two's complement native-endian signed int. Read and write. """) uintne = property(Bits._getuintne, Bits._setuintne, doc="""The bitstring as a two's complement native-endian unsigned int. Read and write. """) floatne = property(Bits._getfloatne, Bits._setfloatne, doc="""The bitstring as a native-endian floating point number. Read and write. """) ue = property(Bits._getue, Bits._setue, doc="""The bitstring as an unsigned exponential-Golomb code. Read and write. """) se = property(Bits._getse, Bits._setse, doc="""The bitstring as a signed exponential-Golomb code. Read and write. """) uie = property(Bits._getuie, Bits._setuie, doc="""The bitstring as an unsigned interleaved exponential-Golomb code. Read and write. """) sie = property(Bits._getsie, Bits._setsie, doc="""The bitstring as a signed interleaved exponential-Golomb code. Read and write. """) hex = property(Bits._gethex, Bits._sethex, doc="""The bitstring as a hexadecimal string. Read and write. """) bin = property(Bits._getbin, Bits._setbin_safe, doc="""The bitstring as a binary string. Read and write. """) oct = property(Bits._getoct, Bits._setoct, doc="""The bitstring as an octal string. Read and write. """) bool = property(Bits._getbool, Bits._setbool, doc="""The bitstring as a bool (True or False). Read and write. """) bytes = property(Bits._getbytes, Bits._setbytes_safe, doc="""The bitstring as a ordinary string. Read and write. """) class ConstBitStream(Bits): """A container or stream holding an immutable sequence of bits. For a mutable container use the BitStream class instead. Methods inherited from Bits: all() -- Check if all specified bits are set to 1 or 0. any() -- Check if any of specified bits are set to 1 or 0. count() -- Count the number of bits set to 1 or 0. cut() -- Create generator of constant sized chunks. endswith() -- Return whether the bitstring ends with a sub-string. find() -- Find a sub-bitstring in the current bitstring. findall() -- Find all occurrences of a sub-bitstring in the current bitstring. join() -- Join bitstrings together using current bitstring. rfind() -- Seek backwards to find a sub-bitstring. split() -- Create generator of chunks split by a delimiter. startswith() -- Return whether the bitstring starts with a sub-bitstring. tobytes() -- Return bitstring as bytes, padding if needed. tofile() -- Write bitstring to file, padding if needed. unpack() -- Interpret bits using format string. Other methods: bytealign() -- Align to next byte boundary. peek() -- Peek at and interpret next bits as a single item. peeklist() -- Peek at and interpret next bits as a list of items. read() -- Read and interpret next bits as a single item. readlist() -- Read and interpret next bits as a list of items. Special methods: Also available are the operators [], ==, !=, +, *, ~, <<, >>, &, |, ^. Properties: bin -- The bitstring as a binary string. bool -- For single bit bitstrings, interpret as True or False. bytepos -- The current byte position in the bitstring. bytes -- The bitstring as a bytes object. float -- Interpret as a floating point number. floatbe -- Interpret as a big-endian floating point number. floatle -- Interpret as a little-endian floating point number. floatne -- Interpret as a native-endian floating point number. hex -- The bitstring as a hexadecimal string. int -- Interpret as a two's complement signed integer. intbe -- Interpret as a big-endian signed integer. intle -- Interpret as a little-endian signed integer. intne -- Interpret as a native-endian signed integer. len -- Length of the bitstring in bits. oct -- The bitstring as an octal string. pos -- The current bit position in the bitstring. se -- Interpret as a signed exponential-Golomb code. ue -- Interpret as an unsigned exponential-Golomb code. sie -- Interpret as a signed interleaved exponential-Golomb code. uie -- Interpret as an unsigned interleaved exponential-Golomb code. uint -- Interpret as a two's complement unsigned integer. uintbe -- Interpret as a big-endian unsigned integer. uintle -- Interpret as a little-endian unsigned integer. uintne -- Interpret as a native-endian unsigned integer. """ __slots__ = ('_pos') def __init__(self, auto=None, length=None, offset=None, **kwargs): """Either specify an 'auto' initialiser: auto -- a string of comma separated tokens, an integer, a file object, a bytearray, a boolean iterable or another bitstring. Or initialise via **kwargs with one (and only one) of: bytes -- raw data as a string, for example read from a binary file. bin -- binary string representation, e.g. '0b001010'. hex -- hexadecimal string representation, e.g. '0x2ef' oct -- octal string representation, e.g. '0o777'. uint -- an unsigned integer. int -- a signed integer. float -- a floating point number. uintbe -- an unsigned big-endian whole byte integer. intbe -- a signed big-endian whole byte integer. floatbe - a big-endian floating point number. uintle -- an unsigned little-endian whole byte integer. intle -- a signed little-endian whole byte integer. floatle -- a little-endian floating point number. uintne -- an unsigned native-endian whole byte integer. intne -- a signed native-endian whole byte integer. floatne -- a native-endian floating point number. se -- a signed exponential-Golomb code. ue -- an unsigned exponential-Golomb code. sie -- a signed interleaved exponential-Golomb code. uie -- an unsigned interleaved exponential-Golomb code. bool -- a boolean (True or False). filename -- a file which will be opened in binary read-only mode. Other keyword arguments: length -- length of the bitstring in bits, if needed and appropriate. It must be supplied for all integer and float initialisers. offset -- bit offset to the data. These offset bits are ignored and this is intended for use when initialising using 'bytes' or 'filename'. """ self._pos = 0 def __new__(cls, auto=None, length=None, offset=None, **kwargs): x = super(ConstBitStream, cls).__new__(cls) x._initialise(auto, length, offset, **kwargs) return x def _setbytepos(self, bytepos): """Move to absolute byte-aligned position in stream.""" self._setbitpos(bytepos * 8) def _getbytepos(self): """Return the current position in the stream in bytes. Must be byte aligned.""" if self._pos % 8: raise ByteAlignError("Not byte aligned in _getbytepos().") return self._pos // 8 def _setbitpos(self, pos): """Move to absolute postion bit in bitstream.""" if pos < 0: raise ValueError("Bit position cannot be negative.") if pos > self.len: raise ValueError("Cannot seek past the end of the data.") self._pos = pos def _getbitpos(self): """Return the current position in the stream in bits.""" return self._pos def _clear(self): Bits._clear(self) self._pos = 0 def __copy__(self): """Return a new copy of the ConstBitStream for the copy module.""" # Note that if you want a new copy (different ID), use _copy instead. # The copy can use the same datastore as it's immutable. s = ConstBitStream() s._datastore = self._datastore # Reset the bit position, don't copy it. s._pos = 0 return s def __add__(self, bs): """Concatenate bitstrings and return new bitstring. bs -- the bitstring to append. """ s = Bits.__add__(self, bs) s._pos = 0 return s def read(self, fmt): """Interpret next bits according to the format string and return result. fmt -- Token string describing how to interpret the next bits. Token examples: 'int:12' : 12 bits as a signed integer 'uint:8' : 8 bits as an unsigned integer 'float:64' : 8 bytes as a big-endian float 'intbe:16' : 2 bytes as a big-endian signed integer 'uintbe:16' : 2 bytes as a big-endian unsigned integer 'intle:32' : 4 bytes as a little-endian signed integer 'uintle:32' : 4 bytes as a little-endian unsigned integer 'floatle:64': 8 bytes as a little-endian float 'intne:24' : 3 bytes as a native-endian signed integer 'uintne:24' : 3 bytes as a native-endian unsigned integer 'floatne:32': 4 bytes as a native-endian float 'hex:80' : 80 bits as a hex string 'oct:9' : 9 bits as an octal string 'bin:1' : single bit binary string 'ue' : next bits as unsigned exp-Golomb code 'se' : next bits as signed exp-Golomb code 'uie' : next bits as unsigned interleaved exp-Golomb code 'sie' : next bits as signed interleaved exp-Golomb code 'bits:5' : 5 bits as a bitstring 'bytes:10' : 10 bytes as a bytes object 'bool' : 1 bit as a bool 'pad:3' : 3 bits of padding to ignore - returns None fmt may also be an integer, which will be treated like the 'bits' token. The position in the bitstring is advanced to after the read items. Raises ReadError if not enough bits are available. Raises ValueError if the format is not understood. """ if isinstance(fmt, numbers.Integral): if fmt < 0: raise ValueError("Cannot read negative amount.") if fmt > self.len - self._pos: raise ReadError("Cannot read {0} bits, only {1} available.", fmt, self.len - self._pos) bs = self._slice(self._pos, self._pos + fmt) self._pos += fmt return bs p = self._pos _, token = tokenparser(fmt) if len(token) != 1: self._pos = p raise ValueError("Format string should be a single token, not {0} " "tokens - use readlist() instead.".format(len(token))) name, length, _ = token[0] if length is None: length = self.len - self._pos value, self._pos = self._readtoken(name, self._pos, length) return value def readlist(self, fmt, **kwargs): """Interpret next bits according to format string(s) and return list. fmt -- A single string or list of strings with comma separated tokens describing how to interpret the next bits in the bitstring. Items can also be integers, for reading new bitstring of the given length. kwargs -- A dictionary or keyword-value pairs - the keywords used in the format string will be replaced with their given value. The position in the bitstring is advanced to after the read items. Raises ReadError is not enough bits are available. Raises ValueError if the format is not understood. See the docstring for 'read' for token examples. 'pad' tokens are skipped and not added to the returned list. >>> h, b1, b2 = s.readlist('hex:20, bin:5, bin:3') >>> i, bs1, bs2 = s.readlist(['uint:12', 10, 10]) """ value, self._pos = self._readlist(fmt, self._pos, **kwargs) return value def readto(self, bs, bytealigned=None): """Read up to and including next occurrence of bs and return result. bs -- The bitstring to find. An integer is not permitted. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty. Raises ReadError if bs is not found. """ if isinstance(bs, numbers.Integral): raise ValueError("Integers cannot be searched for") bs = Bits(bs) oldpos = self._pos p = self.find(bs, self._pos, bytealigned=bytealigned) if not p: raise ReadError("Substring not found") self._pos += bs.len return self._slice(oldpos, self._pos) def peek(self, fmt): """Interpret next bits according to format string and return result. fmt -- Token string describing how to interpret the next bits. The position in the bitstring is not changed. If not enough bits are available then all bits to the end of the bitstring will be used. Raises ReadError if not enough bits are available. Raises ValueError if the format is not understood. See the docstring for 'read' for token examples. """ pos_before = self._pos value = self.read(fmt) self._pos = pos_before return value def peeklist(self, fmt, **kwargs): """Interpret next bits according to format string(s) and return list. fmt -- One or more strings with comma separated tokens describing how to interpret the next bits in the bitstring. kwargs -- A dictionary or keyword-value pairs - the keywords used in the format string will be replaced with their given value. The position in the bitstring is not changed. If not enough bits are available then all bits to the end of the bitstring will be used. Raises ReadError if not enough bits are available. Raises ValueError if the format is not understood. See the docstring for 'read' for token examples. """ pos = self._pos return_values = self.readlist(fmt, **kwargs) self._pos = pos return return_values def bytealign(self): """Align to next byte and return number of skipped bits. Raises ValueError if the end of the bitstring is reached before aligning to the next byte. """ skipped = (8 - (self._pos % 8)) % 8 self.pos += self._offset + skipped assert self._assertsanity() return skipped pos = property(_getbitpos, _setbitpos, doc="""The position in the bitstring in bits. Read and write. """) bitpos = property(_getbitpos, _setbitpos, doc="""The position in the bitstring in bits. Read and write. """) bytepos = property(_getbytepos, _setbytepos, doc="""The position in the bitstring in bytes. Read and write. """) class BitStream(ConstBitStream, BitArray): """A container or stream holding a mutable sequence of bits Subclass of the ConstBitStream and BitArray classes. Inherits all of their methods. Methods: all() -- Check if all specified bits are set to 1 or 0. any() -- Check if any of specified bits are set to 1 or 0. append() -- Append a bitstring. bytealign() -- Align to next byte boundary. byteswap() -- Change byte endianness in-place. count() -- Count the number of bits set to 1 or 0. cut() -- Create generator of constant sized chunks. endswith() -- Return whether the bitstring ends with a sub-string. find() -- Find a sub-bitstring in the current bitstring. findall() -- Find all occurrences of a sub-bitstring in the current bitstring. insert() -- Insert a bitstring. invert() -- Flip bit(s) between one and zero. join() -- Join bitstrings together using current bitstring. overwrite() -- Overwrite a section with a new bitstring. peek() -- Peek at and interpret next bits as a single item. peeklist() -- Peek at and interpret next bits as a list of items. prepend() -- Prepend a bitstring. read() -- Read and interpret next bits as a single item. readlist() -- Read and interpret next bits as a list of items. replace() -- Replace occurrences of one bitstring with another. reverse() -- Reverse bits in-place. rfind() -- Seek backwards to find a sub-bitstring. rol() -- Rotate bits to the left. ror() -- Rotate bits to the right. set() -- Set bit(s) to 1 or 0. split() -- Create generator of chunks split by a delimiter. startswith() -- Return whether the bitstring starts with a sub-bitstring. tobytes() -- Return bitstring as bytes, padding if needed. tofile() -- Write bitstring to file, padding if needed. unpack() -- Interpret bits using format string. Special methods: Mutating operators are available: [], <<=, >>=, +=, *=, &=, |= and ^= in addition to [], ==, !=, +, *, ~, <<, >>, &, | and ^. Properties: bin -- The bitstring as a binary string. bool -- For single bit bitstrings, interpret as True or False. bytepos -- The current byte position in the bitstring. bytes -- The bitstring as a bytes object. float -- Interpret as a floating point number. floatbe -- Interpret as a big-endian floating point number. floatle -- Interpret as a little-endian floating point number. floatne -- Interpret as a native-endian floating point number. hex -- The bitstring as a hexadecimal string. int -- Interpret as a two's complement signed integer. intbe -- Interpret as a big-endian signed integer. intle -- Interpret as a little-endian signed integer. intne -- Interpret as a native-endian signed integer. len -- Length of the bitstring in bits. oct -- The bitstring as an octal string. pos -- The current bit position in the bitstring. se -- Interpret as a signed exponential-Golomb code. ue -- Interpret as an unsigned exponential-Golomb code. sie -- Interpret as a signed interleaved exponential-Golomb code. uie -- Interpret as an unsigned interleaved exponential-Golomb code. uint -- Interpret as a two's complement unsigned integer. uintbe -- Interpret as a big-endian unsigned integer. uintle -- Interpret as a little-endian unsigned integer. uintne -- Interpret as a native-endian unsigned integer. """ __slots__ = () # As BitStream objects are mutable, we shouldn't allow them to be hashed. __hash__ = None def __init__(self, auto=None, length=None, offset=None, **kwargs): """Either specify an 'auto' initialiser: auto -- a string of comma separated tokens, an integer, a file object, a bytearray, a boolean iterable or another bitstring. Or initialise via **kwargs with one (and only one) of: bytes -- raw data as a string, for example read from a binary file. bin -- binary string representation, e.g. '0b001010'. hex -- hexadecimal string representation, e.g. '0x2ef' oct -- octal string representation, e.g. '0o777'. uint -- an unsigned integer. int -- a signed integer. float -- a floating point number. uintbe -- an unsigned big-endian whole byte integer. intbe -- a signed big-endian whole byte integer. floatbe - a big-endian floating point number. uintle -- an unsigned little-endian whole byte integer. intle -- a signed little-endian whole byte integer. floatle -- a little-endian floating point number. uintne -- an unsigned native-endian whole byte integer. intne -- a signed native-endian whole byte integer. floatne -- a native-endian floating point number. se -- a signed exponential-Golomb code. ue -- an unsigned exponential-Golomb code. sie -- a signed interleaved exponential-Golomb code. uie -- an unsigned interleaved exponential-Golomb code. bool -- a boolean (True or False). filename -- a file which will be opened in binary read-only mode. Other keyword arguments: length -- length of the bitstring in bits, if needed and appropriate. It must be supplied for all integer and float initialisers. offset -- bit offset to the data. These offset bits are ignored and this is intended for use when initialising using 'bytes' or 'filename'. """ self._pos = 0 # For mutable BitStreams we always read in files to memory: if not isinstance(self._datastore, ByteStore): self._ensureinmemory() def __new__(cls, auto=None, length=None, offset=None, **kwargs): x = super(BitStream, cls).__new__(cls) x._initialise(auto, length, offset, **kwargs) return x def __copy__(self): """Return a new copy of the BitStream.""" s_copy = BitStream() s_copy._pos = 0 if not isinstance(self._datastore, ByteStore): # Let them both point to the same (invariant) array. # If either gets modified then at that point they'll be read into memory. s_copy._datastore = self._datastore else: s_copy._datastore = ByteStore(self._datastore._rawarray[:], self._datastore.bitlength, self._datastore.offset) return s_copy def prepend(self, bs): """Prepend a bitstring to the current bitstring. bs -- The bitstring to prepend. """ bs = self._converttobitstring(bs) self._prepend(bs) self._pos += bs.len def pack(fmt, *values, **kwargs): """Pack the values according to the format string and return a new BitStream. fmt -- A single string or a list of strings with comma separated tokens describing how to create the BitStream. values -- Zero or more values to pack according to the format. kwargs -- A dictionary or keyword-value pairs - the keywords used in the format string will be replaced with their given value. Token examples: 'int:12' : 12 bits as a signed integer 'uint:8' : 8 bits as an unsigned integer 'float:64' : 8 bytes as a big-endian float 'intbe:16' : 2 bytes as a big-endian signed integer 'uintbe:16' : 2 bytes as a big-endian unsigned integer 'intle:32' : 4 bytes as a little-endian signed integer 'uintle:32' : 4 bytes as a little-endian unsigned integer 'floatle:64': 8 bytes as a little-endian float 'intne:24' : 3 bytes as a native-endian signed integer 'uintne:24' : 3 bytes as a native-endian unsigned integer 'floatne:32': 4 bytes as a native-endian float 'hex:80' : 80 bits as a hex string 'oct:9' : 9 bits as an octal string 'bin:1' : single bit binary string 'ue' / 'uie': next bits as unsigned exp-Golomb code 'se' / 'sie': next bits as signed exp-Golomb code 'bits:5' : 5 bits as a bitstring object 'bytes:10' : 10 bytes as a bytes object 'bool' : 1 bit as a bool 'pad:3' : 3 zero bits as padding >>> s = pack('uint:12, bits', 100, '0xffe') >>> t = pack(['bits', 'bin:3'], s, '111') >>> u = pack('uint:8=a, uint:8=b, uint:55=a', a=6, b=44) """ tokens = [] if isinstance(fmt, basestring): fmt = [fmt] try: for f_item in fmt: _, tkns = tokenparser(f_item, tuple(sorted(kwargs.keys()))) tokens.extend(tkns) except ValueError as e: raise CreationError(*e.args) value_iter = iter(values) s = BitStream() try: for name, length, value in tokens: # If the value is in the kwd dictionary then it takes precedence. if value in kwargs: value = kwargs[value] # If the length is in the kwd dictionary then use that too. if length in kwargs: length = kwargs[length] # Also if we just have a dictionary name then we want to use it if name in kwargs and length is None and value is None: s.append(kwargs[name]) continue if length is not None: length = int(length) if value is None and name != 'pad': # Take the next value from the ones provided value = next(value_iter) s._append(BitStream._init_with_token(name, length, value)) except StopIteration: raise CreationError("Not enough parameters present to pack according to the " "format. {0} values are needed.", len(tokens)) try: next(value_iter) except StopIteration: # Good, we've used up all the *values. return s raise CreationError("Too many parameters present to pack according to the format.") # Aliases for backward compatibility ConstBitArray = Bits BitString = BitStream __all__ = ['ConstBitArray', 'ConstBitStream', 'BitStream', 'BitArray', 'Bits', 'BitString', 'pack', 'Error', 'ReadError', 'InterpretError', 'ByteAlignError', 'CreationError', 'bytealigned']
0.001394
from approver.models import Speciality, QI_Interest, Suffix, ClinicalArea, Self_Classification, ClinicalDepartment from django.contrib.auth.models import User from approver import utils class AboutYouForm(): def __init__(self, user=None, person=None): person = person if person else user.person date_joined = user.date_joined if user else None self.first_name = {'name': 'first_name', 'placeholder': 'Jane', 'label': 'First Name', 'type': 'text', 'value': person.first_name or '', 'required': True, 'input_classes': 'about__field--box'} self.last_name = {'name': 'last_name', 'placeholder': 'Doe', 'label': 'Last Name', 'type': 'text', 'required': True, 'value': person.last_name or '', 'input_classes': 'about__field--box'} self.title = {'name': 'title', 'label': 'Title', 'placeholder': 'e.g. Mrs.', 'type': 'text', 'value': person.title or '', 'input_classes': 'about__field--box'} self.department = {'name': 'department', 'placeholder': 'Department', 'label': 'What is your primary department?', 'selected': getattr(person.department_select,'name',''), 'options': ClinicalDepartment.objects.values_list('name', flat=True).order_by('sort_order'), 'input_classes': 'about__field--box'} self.clinical_area = {'name': 'clinical_area', 'placeholder': 'e.g. NICU 2', 'label': 'What is your clinical area? Press "enter" to save', 'model': 'clinicalarea', 'filter_field': 'name', 'selected': utils.get_related(person,"clinical_area"), 'input_classes': 'about__field--box', 'div_classes': 'about__field--width100'} self.self_classification = {'name': 'self_classification', 'label': 'Self Classification', 'placeholder': 'Self Classification', 'selected': getattr(person.self_classification,'name',''), 'other': person.other_self_classification or '', 'options': Self_Classification.objects.values_list('name', flat=True).order_by('sort_order'), 'input_class_list': 'about__field-box'} self.business_address = person.business_address self.webpage_url = {'name': 'webpage_url', 'placeholder': 'Enter dept webpage url here', 'label': 'Webpage URL', 'type': 'text', 'value': person.webpage_url or '', 'input_classes': 'about__field--box'} self.email = {'name': 'email', 'placeholder': '[email protected]', 'label': 'Email Address', 'type': 'email', 'value': person.email_address or '', 'input_classes': 'about__field--box'} self.business_phone = {'name': 'business_phone', 'placeholder': '(555) 555-5555', 'label': 'Business Phone Number', 'type': 'text', 'value': person.business_phone or '', 'input_classes': 'about__field--box'} self.contact_phone = {'name': 'contact_phone', 'placeholder': '(555) 555-5555', 'label': 'Contact Phone Number', 'type': 'text', 'value': person.contact_phone or '', 'input_classes': 'about__field--box'} self.speciality_tags = {'name': 'speciality', 'placeholder': 'e.g. Pediatric Nephrology', 'label': 'What is your speciality or certification? Press "enter" to save.', 'model': 'speciality', 'filter_field': 'name', 'selected': utils.get_related(person, 'speciality'), 'input_classes': 'about__field--box, about__details--height', 'div_classes': 'about__field--width100'} self.qi_interest_tags = {'name': 'qi_interest', 'placeholder': 'e.g. Transitions in Care', 'label': 'List your Quality Improvement Interests. Press "enter" to save.', 'model': 'qi_interest', 'filter_field': 'name', 'selected': utils.get_related(person, 'qi_interest'), 'input_classes': 'about__field--box', 'div_classes': 'about__field--width100'} self.expertise_tags = {'name': 'expertise', 'placeholder': 'e.g. Nephrotic Syndrome', 'label': 'What is your area of expertise? Press "enter" to save.', 'model': 'descriptor', 'filter_field': 'mesh_heading', 'selected': utils.get_related(person, 'expertise'), 'input_classes': 'about__field--box', 'div_classes': 'about__field--width100'} self.suffix_tags = {'name': 'suffix', 'label': 'Degree/Suffix', 'placeholder': 'e.g. PhD or M.D.', 'model': 'suffix', 'filter_field': 'name', 'selected': utils.get_related(person, 'suffix'), 'input_classes': 'about__field--box', 'div_classes': 'about__field--width100'} self.qi_required = person.qi_required self.last_login = {'name': 'last_login', 'label': 'Last Login', 'type': 'text', 'value': person.last_login_time or '', 'input_classes': 'about__acctinfo--border', 'div_classes': 'about__acctinfo'} self.account_expiration = {'name': 'account_expiration', 'label': 'Account Expiration', 'type': 'text', 'value': person.account_expiration_time or '', 'input_classes': 'about__acctinfo--border', 'div_classes': 'about__acctinfo'} self.account_created = {'name': 'account_created', 'label': 'Account Created', 'type': 'text', 'value': date_joined or '', 'input_classes': 'about__acctinfo--border', 'div_classes': 'about__acctinfo'} self.training_program = {'name': 'training_program', 'label': 'Training Program', 'placeholder': 'If you selected yes, enter your training program here.', 'type': 'text', 'value': person.training or '', 'input_classes': 'about__field--box', 'div_classes': 'about__question--train'}
0.004762
#!/usr/bin/env python # Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + import os import requests doc='''.. _v{version}: ========================== Mantid {version} Release Notes ========================== .. contents:: Table of Contents :local: This is a patch release that corrects some significant issues since :ref:`version {version_previous} <v{version_previous}>`. The main changes are: **ADD SUMMARY HERE** Citation -------- Please cite any usage of Mantid as follows: - *Mantid {version}: Manipulation and Analysis Toolkit for Instrument Data.; Mantid Project*. `doi: 10.5286/Software/Mantid{version} <http://dx.doi.org/10.5286/Software/Mantid{version}>`_ - Arnold, O. et al. *Mantid-Data Analysis and Visualization Package for Neutron Scattering and mu-SR Experiments.* Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment 764 (2014): 156-166 `doi: 10.1016/j.nima.2014.07.029 <https://doi.org/10.1016/j.nima.2014.07.029>`_ (`download bibtex <https://raw.githubusercontent.com/mantidproject/mantid/master/docs/source/mantid.bib>`_) Changes in this version ----------------------- {changes} Summary of impact ----------------- {impact} .. _download page: http://download.mantidproject.org .. _forum: http://forum.mantidproject.org .. _GitHub release page: https://github.com/mantidproject/mantid/releases/tag/v{version} ''' def getOauth(oauth = None): oauthfile = os.path.expanduser("~/.ssh/github_oauth") if oauth is None and os.path.exists(oauthfile): print("Found oauth token '%s'" % oauthfile) with open(oauthfile, 'r') as handle: oauth='\n'.join(handle.readlines()) handle.close() oauth=oauth.strip() return oauth def getInfo(number, oauth): urls = ['{keyword}/{number:d}'.format(keyword=item, number=number) for item in ['pulls', 'issues']] endpoint = 'https://api.github.com/repos/mantidproject/mantid/' urls = [endpoint + url for url in urls] req_params={} if oauth is not None: req_params['access_token']=oauth for url in urls: req = requests.get(url, params=req_params) json = req.json() if req.status_code == 403: print('For', number, 'status:', req.status_code, 'skipping') print(json['message']) continue json = req.json() if json.get('message', None) == 'Not Found': print('Failed to find information on', number, 'try as an issue') continue return dict( number = int(json['number']), url = json['html_url'], title = json['title'].strip() ) return None def formatChanges(pullrequests): changes = ['* `{number:d} <{url}>`_ {title} '.format(**pr) for pr in pullrequests] changes = '\n'.join(changes) return changes def formatImpact(pullrequests): length_number = 5 # length of the word 'Issue' length_title = 6 # length of the word 'Impact' for pullrequest in pullrequests: temp = len(str(pullrequest['number'])) if temp > length_number: length_number = temp temp = len(pullrequest['title']) if temp > length_title: length_title = temp impact_format = '| {number:%dd} | {title:%d} | | **unknown** |' % (length_number, length_title) # sorry for this looking wierd impact_join = [length_number + 2, length_title+2, 10, 14] title_format = '|{:%d}|{:%d}|{:%d}|{:%d}|' % tuple(impact_join) title_join = '+' + '+'.join(['='*length for length in impact_join]) + '+' impact_title = [title_format.format(' Issue', ' Impact', ' Solution', ' Side Effect'), title_format.format('', '', '', ' Probability'), title_join] impact_join = ['-'*length for length in impact_join] impact_join = '\n+' + '+'.join(impact_join) + '+' impact = [impact_format.format(**pr) + impact_join for pr in pullrequests] for i, line in enumerate(impact_title): impact.insert(i, line) impact.insert(0, impact_join) return '\n'.join(impact) def getPreviousRelease(release, previous): if previous is not None: return previous splitted = release.split('.') splitted[-1] = (str(int(splitted[-1])-1)) return '.'.join(splitted) if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser(description="Generate patch release page") parser.add_argument('--release', required=True) parser.add_argument('--previous', help='specify a particular previous release') parser.add_argument('-p', '--pulls', nargs='+', type=int, help='specify a list of pull requests and/or issues') parser.add_argument('--oauth', help='github oauth token - automatically checks ~/.ssh/github_oauth') args=parser.parse_args() oauth = getOauth(args.oauth) pullrequests = [getInfo(pullrequest, oauth) for pullrequest in args.pulls] pullrequests = [pullrequest for pullrequest in pullrequests if pullrequest is not None] changes = formatChanges(pullrequests) impact = formatImpact(pullrequests) config = dict( version=args.release, version_previous=getPreviousRelease(args.release, args.previous), changes=changes, impact=impact) filename = 'index.rst' print('Writing output to', filename) with open(filename, 'w') as handle: handle.write(doc.format(**config))
0.004924
import numpy as np import scipy import cmath import math from decimal import Decimal #Test program for a single square barrier m = float((9.11*10**-31)) a = float(10**(-10)) E = float(0.81*1.602*10**-19) V = float(0.8*1.602*10**-19) h = (6.626*10**-34)/(2*np.pi) k1 = cmath.sqrt((2*m*E/h**2)) k2 = cmath.sqrt((2*m*(E-V)/h**2)) print(k1) print(k2) def d12(k1,k2): return 0.5*np.matrix(((1+k2/k1,1-k2/k1),(1-k2/k1,1+k2/k1))) def d21(k1,k2): #return 0.5*np.matrix(((1+(k1/k2),1-(k1/k2)),(1-(k1/k2),1+(k1/k2)))) #return 0.5*np.matrix(((1+k2/k1,1-k2/k1),(1-k2/k1,1+k2/k1))) #return 0.5*(1+k1/k2) return 0.5*np.matrix(((1+k1/k2,1-k1/k2),(1-k1/k2,1+k1/k2))) def P2(k2,x): return np.matrix(((np.exp(-(1j)*k2*x),0),(0,np.exp((1j)*k2*x)))) def P1(k1,x): return np.matrix(((np.exp((1j)*k1*x),0),(0,np.exp(-(1j)*k1*x)))) A = d12(k1,k2) C = d21(k1,k2) B = P2(k2,a) D = P1(k1,a) H = np.dot(A,B) J = np.dot(C,D) X = np.dot(H,J) eheh = np.array((H,J)) FF = np.dot(eheh[0],eheh[1]) print(FF[0,0]) print(np.absolute(FF[0,0]**-2)) F = np.absolute(FF[0,0]**-2) print(F*np.absolute(FF[1,0])**2)
0.043636
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrappers for sparse cross operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.layers.ops import gen_sparse_feature_cross_op from tensorflow.contrib.util import loader from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import math_ops from tensorflow.python.platform import resource_loader _sparse_feature_cross_op = loader.load_op_library( resource_loader.get_path_to_datafile("_sparse_feature_cross_op.so")) # Default hash key for the FingerprintCat64. SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY = 0xDECAFCAFFE @deprecated_arg_values( "2016-11-20", "The default behavior of sparse_feature_cross is changing, the default\n" "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n" "From that point on sparse_feature_cross will always use FingerprintCat64\n" "to concatenate the feature fingerprints. And the underlying\n" "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n" "as deprecated.", hash_key=None) def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0, name=None, hash_key=None): """Crosses a list of Tensor or SparseTensor objects. See sparse_feature_cross_kernel.cc for more details. Args: inputs: List of `SparseTensor` or `Tensor` to be crossed. hashed_output: If true, returns the hash of the cross instead of the string. This will allow us avoiding string manipulations. num_buckets: It is used if hashed_output is true. output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. name: A name prefix for the returned tensors (optional). hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseFeatureCrossOp. The default value is None, but will become SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY after 2016-11-20 (optional). Returns: A `SparseTensor` with the crossed features. Return type is string if hashed_output=False, int64 otherwise. Raises: TypeError: If the inputs aren't either SparseTensor or Tensor. """ if not isinstance(inputs, list): raise TypeError("Inputs must be a list") if not all(isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor) for i in inputs): raise TypeError("All inputs must be SparseTensors") sparse_inputs = [i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)] dense_inputs = [i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)] indices = [sp_input.indices for sp_input in sparse_inputs] values = [sp_input.values for sp_input in sparse_inputs] shapes = [sp_input.dense_shape for sp_input in sparse_inputs] out_type = dtypes.int64 if hashed_output else dtypes.string internal_type = dtypes.string for i in range(len(values)): if values[i].dtype != dtypes.string: values[i] = math_ops.to_int64(values[i]) internal_type = dtypes.int64 for i in range(len(dense_inputs)): if dense_inputs[i].dtype != dtypes.string: dense_inputs[i] = math_ops.to_int64(dense_inputs[i]) internal_type = dtypes.int64 if hash_key: indices_out, values_out, shape_out = ( gen_sparse_feature_cross_op.sparse_feature_cross_v2( indices, values, shapes, dense_inputs, hashed_output, num_buckets, hash_key=hash_key, out_type=out_type, internal_type=internal_type, name=name)) else: indices_out, values_out, shape_out = ( gen_sparse_feature_cross_op.sparse_feature_cross( indices, values, shapes, dense_inputs, hashed_output, num_buckets, out_type=out_type, internal_type=internal_type, name=name)) return sparse_tensor.SparseTensor(indices_out, values_out, shape_out) ops.NotDifferentiable("SparseFeatureCross") ops.NotDifferentiable("SparseFeatureCrossV2")
0.00436
# -*- coding: utf-8 -*- # # ceph-deploy documentation build configuration file, created by # sphinx-quickstart on Mon Oct 21 09:32:42 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_themes')) sys.path.insert(0, os.path.abspath('..')) import ceph_deploy # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'ceph-deploy' copyright = u'2013, Inktank' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = ceph_deploy.__version__ # The full version, including alpha/beta/rc tags. release = ceph_deploy.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'ceph' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = False # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ceph-deploydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ceph-deploy.tex', u'ceph-deploy Documentation', u'Inktank', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ceph-deploy', u'ceph-deploy Documentation', [u'Inktank'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ceph-deploy', u'ceph-deploy Documentation', u'Inktank', 'ceph-deploy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # XXX Uncomment when we are ready to link to ceph docs # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None}
0.005877
""" Provides a function for importing a git repository into the lms instance when using a mongo modulestore """ import os import re import StringIO import subprocess import logging from django.conf import settings from django.core import management from django.core.management.base import CommandError from django.utils import timezone from django.utils.translation import ugettext as _ import mongoengine from dashboard.models import CourseImportLog from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey log = logging.getLogger(__name__) GIT_REPO_DIR = getattr(settings, 'GIT_REPO_DIR', '/edx/var/app/edxapp/course_repos') GIT_IMPORT_STATIC = getattr(settings, 'GIT_IMPORT_STATIC', True) class GitImportError(Exception): """ Exception class for handling the typical errors in a git import. """ NO_DIR = _("Path {0} doesn't exist, please create it, " "or configure a different path with " "GIT_REPO_DIR").format(GIT_REPO_DIR) URL_BAD = _('Non usable git url provided. Expecting something like:' ' [email protected]:mitocw/edx4edx_lite.git') BAD_REPO = _('Unable to get git log') CANNOT_PULL = _('git clone or pull failed!') XML_IMPORT_FAILED = _('Unable to run import command.') UNSUPPORTED_STORE = _('The underlying module store does not support import.') # Translators: This is an error message when they ask for a # particular version of a git repository and that version isn't # available from the remote source they specified REMOTE_BRANCH_MISSING = _('The specified remote branch is not available.') # Translators: Error message shown when they have asked for a git # repository branch, a specific version within a repository, that # doesn't exist, or there is a problem changing to it. CANNOT_BRANCH = _('Unable to switch to specified branch. Please check ' 'your branch name.') def cmd_log(cmd, cwd): """ Helper function to redirect stderr to stdout and log the command used along with the output. Will raise subprocess.CalledProcessError if command doesn't return 0, and returns the command's output. """ output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT) log.debug('Command was: {0!r}. ' 'Working directory was: {1!r}'.format(' '.join(cmd), cwd)) log.debug('Command output was: {0!r}'.format(output)) return output def switch_branch(branch, rdir): """ This will determine how to change the branch of the repo, and then use the appropriate git commands to do so. Raises an appropriate GitImportError exception if there is any issues with changing branches. """ # Get the latest remote try: cmd_log(['git', 'fetch', ], rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to fetch remote: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) # Check if the branch is available from the remote. cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ] try: output = cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Getting a list of remote branches failed: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) if branch not in output: raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING) # Check it the remote branch has already been made locally cmd = ['git', 'branch', '-a', ] try: output = cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Getting a list of local branches failed: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) branches = [] for line in output.split('\n'): branches.append(line.replace('*', '').strip()) if branch not in branches: # Checkout with -b since it is remote only cmd = ['git', 'checkout', '--force', '--track', '-b', branch, 'origin/{0}'.format(branch), ] try: cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to checkout remote branch: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) # Go ahead and reset hard to the newest version of the branch now that we know # it is local. try: cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to reset to branch: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) def add_repo(repo, rdir_in, branch=None): """ This will add a git repo into the mongo modulestore. If branch is left as None, it will fetch the most recent version of the current branch. """ # pylint: disable=too-many-statements # Set defaults even if it isn't defined in settings mongo_db = { 'host': 'localhost', 'port': 27017, 'user': '', 'password': '', 'db': 'xlog', } # Allow overrides if hasattr(settings, 'MONGODB_LOG'): for config_item in ['host', 'user', 'password', 'db', 'port']: mongo_db[config_item] = settings.MONGODB_LOG.get( config_item, mongo_db[config_item]) if not os.path.isdir(GIT_REPO_DIR): raise GitImportError(GitImportError.NO_DIR) # pull from git if not (repo.endswith('.git') or repo.startswith(('http:', 'https:', 'git:', 'file:'))): raise GitImportError(GitImportError.URL_BAD) if rdir_in: rdir = os.path.basename(rdir_in) else: rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0] log.debug('rdir = {0}'.format(rdir)) rdirp = '{0}/{1}'.format(GIT_REPO_DIR, rdir) if os.path.exists(rdirp): log.info('directory already exists, doing a git pull instead ' 'of git clone') cmd = ['git', 'pull', ] cwd = rdirp else: cmd = ['git', 'clone', repo, ] cwd = GIT_REPO_DIR cwd = os.path.abspath(cwd) try: ret_git = cmd_log(cmd, cwd=cwd) except subprocess.CalledProcessError as ex: log.exception('Error running git pull: %r', ex.output) raise GitImportError(GitImportError.CANNOT_PULL) if branch: switch_branch(branch, rdirp) # get commit id cmd = ['git', 'log', '-1', '--format=%H', ] try: commit_id = cmd_log(cmd, cwd=rdirp) except subprocess.CalledProcessError as ex: log.exception('Unable to get git log: %r', ex.output) raise GitImportError(GitImportError.BAD_REPO) ret_git += '\nCommit ID: {0}'.format(commit_id) # get branch cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ] try: branch = cmd_log(cmd, cwd=rdirp) except subprocess.CalledProcessError as ex: # I can't discover a way to excercise this, but git is complex # so still logging and raising here in case. log.exception('Unable to determine branch: %r', ex.output) raise GitImportError(GitImportError.BAD_REPO) ret_git += '{0}Branch: {1}'.format(' \n', branch) # Get XML logging logger and capture debug to parse results output = StringIO.StringIO() import_log_handler = logging.StreamHandler(output) import_log_handler.setLevel(logging.DEBUG) logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course', 'xmodule.modulestore.xml', 'xmodule.seq_module', ] loggers = [] for logger_name in logger_names: logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) logger.addHandler(import_log_handler) loggers.append(logger) try: management.call_command('import', GIT_REPO_DIR, rdir, nostatic=not GIT_IMPORT_STATIC) except CommandError: raise GitImportError(GitImportError.XML_IMPORT_FAILED) except NotImplementedError: raise GitImportError(GitImportError.UNSUPPORTED_STORE) ret_import = output.getvalue() # Remove handler hijacks for logger in loggers: logger.setLevel(logging.NOTSET) logger.removeHandler(import_log_handler) course_key = None location = 'unknown' # extract course ID from output of import-command-run and make symlink # this is needed in order for custom course scripts to work match = re.search(r'(?ms)===> IMPORTING course (\S+)', ret_import) if match: course_id = match.group(1) try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) cdir = '{0}/{1}'.format(GIT_REPO_DIR, course_key.course) log.debug('Studio course dir = {0}'.format(cdir)) if os.path.exists(cdir) and not os.path.islink(cdir): log.debug(' -> exists, but is not symlink') log.debug(subprocess.check_output(['ls', '-l', ], cwd=os.path.abspath(cdir))) try: os.rmdir(os.path.abspath(cdir)) except OSError: log.exception('Failed to remove course directory') if not os.path.exists(cdir): log.debug(' -> creating symlink between {0} and {1}'.format(rdirp, cdir)) try: os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir)) except OSError: log.exception('Unable to create course symlink') log.debug(subprocess.check_output(['ls', '-l', ], cwd=os.path.abspath(cdir))) # store import-command-run output in mongo mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db) try: if mongo_db['user'] and mongo_db['password']: mdb = mongoengine.connect(mongo_db['db'], host=mongouri) else: mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port']) except mongoengine.connection.ConnectionError: log.exception('Unable to connect to mongodb to save log, please ' 'check MONGODB_LOG settings') cil = CourseImportLog( course_id=course_key, location=location, repo_dir=rdir, created=timezone.now(), import_log=ret_import, git_log=ret_git, ) cil.save() log.debug('saved CourseImportLog for {0}'.format(cil.course_id)) mdb.disconnect()
0.001025
#!/usr/bin/env python # modify-non-primary-files.py - Fake build while modifying files -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See http://swift.org/LICENSE.txt for license information # See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ---------------------------------------------------------------------------- # # modify-non-primary-files.py simulates a build where the user is modifying the # source files during compilation. # # ---------------------------------------------------------------------------- from __future__ import print_function import os import sys assert sys.argv[1] == '-frontend' if '-primary-file' in sys.argv: primaryFileIndex = sys.argv.index('-primary-file') + 1 primaryFile = sys.argv[primaryFileIndex] # Modify all files after the primary file. # Ideally this would modify every non-primary file, but that's harder to # infer without actually parsing the arguments. for file in sys.argv[primaryFileIndex + 1:]: if file.startswith('-'): break os.utime(file, None) else: primaryFile = None outputFile = sys.argv[sys.argv.index('-o') + 1] # Update the output file mtime, or create it if necessary. # From http://stackoverflow.com/a/1160227. with open(outputFile, 'a'): os.utime(outputFile, None) if primaryFile: print("Handled", os.path.basename(primaryFile)) else: print("Produced", os.path.basename(outputFile))
0
import warnings from django.apps import apps from django.utils.deprecation import RemovedInDjango19Warning warnings.warn( "The utilities in django.db.models.loading are deprecated " "in favor of the new application loading system.", RemovedInDjango19Warning, stacklevel=2) __all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models', 'load_app', 'app_cache_ready') # Backwards-compatibility for private APIs during the deprecation period. UnavailableApp = LookupError cache = apps # These methods were always module level, so are kept that way for backwards # compatibility. get_apps = apps.get_apps get_app_package = apps.get_app_package get_app_path = apps.get_app_path get_app_paths = apps.get_app_paths get_app = apps.get_app get_models = apps.get_models get_model = apps.get_model register_models = apps.register_models load_app = apps.load_app app_cache_ready = apps.app_cache_ready # This method doesn't return anything interesting in Django 1.6. Maintain it # just for backwards compatibility until this module is deprecated. def get_app_errors(): try: return apps.app_errors except AttributeError: apps.app_errors = {} return apps.app_errors
0.000813
# # Copyright 2015 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # import errno import logging import os import blivet import blivet.formats import blivet.formats.fs import blivet.size from blivet.devices import LVMVolumeGroupDevice from blivet.devices import LVMThinPoolDevice from blivet.devices import LVMLogicalVolumeDevice from blivet.devices import LVMThinLogicalVolumeDevice from blivet import udev from vdsm import utils import fstab import exception as ge from . import makePublic log = logging.getLogger("Gluster") _pvCreateCommandPath = utils.CommandPath("pvcreate", "/sbin/pvcreate", "/usr/sbin/pvcreate",) _vgCreateCommandPath = utils.CommandPath("vgcreate", "/sbin/vgcreate", "/usr/sbin/vgcreate",) _lvconvertCommandPath = utils.CommandPath("lvconvert", "/sbin/lvconvert", "/usr/sbin/lvconvert",) _lvchangeCommandPath = utils.CommandPath("lvchange", "/sbin/lvchange", "/usr/sbin/lvchange",) _vgscanCommandPath = utils.CommandPath("vgscan", "/sbin/vgscan", "/usr/sbin/vgscan",) # All size are in MiB unless otherwise specified DEFAULT_CHUNK_SIZE_KB = 256 DEFAULT_METADATA_SIZE_KB = 16777216 MIN_VG_SIZE = 1048576 MIN_METADATA_PERCENT = 0.005 DEFAULT_FS_TYPE = "xfs" DEFAULT_MOUNT_OPTIONS = "inode64,noatime" def _getDeviceDict(device, createBrick=False): info = {'name': device.name, 'devPath': device.path, 'devUuid': device.uuid or '', 'bus': device.bus or '', 'model': '', 'fsType': '', 'mountPoint': '', 'uuid': '', 'createBrick': createBrick} if isinstance(device.size, blivet.size.Size): info['size'] = '%s' % device.size.convertTo(spec="MiB") else: info['size'] = '%s' % device.size if not info['bus'] and device.parents: info['bus'] = device.parents[0].bus if device.model: info['model'] = "%s (%s)" % (device.model, device.type) else: info['model'] = device.type if device.format: info['uuid'] = device.format.uuid or '' # lvm vg will not have sysfs path if hasattr(udev, 'get_device'): dev = udev.get_device(device.sysfsPath) or {} elif hasattr(udev, 'udev_get_device'): dev = udev.udev_get_device(device.sysfsPath) or {} else: dev = {} info['fsType'] = device.format.type or dev.get('ID_FS_TYPE', '') if hasattr(device.format, 'mountpoint'): info['mountPoint'] = device.format.mountpoint or '' return info def _parseDevices(devices): deviceList = [] for device in devices: deviceList.append(_getDeviceDict(device, _canCreateBrick(device))) return deviceList def _canCreateBrick(device): if not device or device.kids > 0 or device.format.type or \ hasattr(device.format, 'mountpoint') or \ device.type in ['cdrom', 'lvmvg', 'lvmthinpool', 'lvmlv', 'lvmthinlv']: return False return True def _reset_blivet(blivetEnv): try: blivetEnv.reset() except (blivet.errors.UnusableConfigurationError, blivet.errors.StorageError) as e: log.error("Error: %s" % e.message) @makePublic def storageDevicesList(): blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) return _parseDevices(blivetEnv.devices) @makePublic def createBrick(brickName, mountPoint, devNameList, fsType=DEFAULT_FS_TYPE, raidParams={}): def _getDeviceList(devNameList): return [blivetEnv.devicetree.getDeviceByName(devName.split("/")[-1]) for devName in devNameList] def _makePartition(deviceList): pvDeviceList = [] doPartitioning = False for dev in deviceList: if dev.type not in ['disk', 'dm-multipath']: pvDeviceList.append(dev) else: blivetEnv.initializeDisk(dev) part = blivetEnv.newPartition(fmt_type="lvmpv", grow=True, parents=[dev]) blivetEnv.createDevice(part) pvDeviceList.append(part) doPartitioning = True if doPartitioning: blivet.partitioning.doPartitioning(blivetEnv) return pvDeviceList def _createPV(deviceList, alignment=0): def _createAlignedPV(deviceList, alignment): for dev in deviceList: # bz#1178705: Blivet always creates pv with 1MB dataalignment # Workaround: Till blivet fixes the issue, we use lvm pvcreate rc, out, err = utils.execCmd([_pvCreateCommandPath.cmd, '--dataalignment', '%sk' % alignment, dev.path]) if rc: raise ge.GlusterHostStorageDevicePVCreateFailedException( dev.path, alignment, rc, out, err) _reset_blivet(blivetEnv) return _getDeviceList([dev.name for dev in deviceList]) if alignment: blivetEnv.doIt() return _createAlignedPV(deviceList, alignment) for dev in deviceList: lvmpv = blivet.formats.getFormat("lvmpv", device=dev.path) blivetEnv.formatDevice(dev, lvmpv) blivet.partitioning.doPartitioning(blivetEnv) return deviceList def _createVG(vgName, deviceList, stripeSize=0): if stripeSize: # bz#1198568: Blivet always creates vg with 1MB stripe size # Workaround: Till blivet fixes the issue, use vgcreate command devices = ','.join([device.path for device in deviceList]) rc, out, err = utils.execCmd([_vgCreateCommandPath.cmd, '-s', '%sk' % stripeSize, vgName, devices]) if rc: raise ge.GlusterHostStorageDeviceVGCreateFailedException( vgName, devices, stripeSize, rc, out, err) blivetEnv.reset() vg = blivetEnv.devicetree.getDeviceByName(vgName) else: vg = LVMVolumeGroupDevice(vgName, parents=deviceList) blivetEnv.createDevice(vg) return vg def _createThinPool(poolName, vg, alignment=0, poolMetaDataSize=0, poolDataSize=0): if not alignment: # bz#1180228: blivet doesn't handle percentage-based sizes properly # Workaround: Till the bz gets fixed, we take only 99% size from vg pool = LVMThinPoolDevice(poolName, parents=[vg], size=(vg.size * 99 / 100), grow=True) blivetEnv.createDevice(pool) return pool else: metaName = "meta-%s" % poolName vgPoolName = "%s/%s" % (vg.name, poolName) metaLv = LVMLogicalVolumeDevice( metaName, parents=[vg], size=blivet.size.Size('%d KiB' % poolMetaDataSize)) poolLv = LVMLogicalVolumeDevice( poolName, parents=[vg], size=blivet.size.Size('%d KiB' % poolDataSize)) blivetEnv.createDevice(metaLv) blivetEnv.createDevice(poolLv) blivetEnv.doIt() # bz#1100514: LVM2 currently only supports physical extent sizes # that are a power of 2. Till that support is available we need # to use lvconvert to achive that. # bz#1179826: blivet doesn't support lvconvert functionality. # Workaround: Till the bz gets fixed, lvconvert command is used rc, out, err = utils.execCmd([_lvconvertCommandPath.cmd, '--chunksize', '%sK' % alignment, '--thinpool', vgPoolName, '--poolmetadata', "%s/%s" % (vg.name, metaName), '--poolmetadataspar', 'n', '-y']) if rc: raise ge.GlusterHostStorageDeviceLVConvertFailedException( vg.path, alignment, rc, out, err) rc, out, err = utils.execCmd([_lvchangeCommandPath.cmd, '--zero', 'n', vgPoolName]) if rc: raise ge.GlusterHostStorageDeviceLVChangeFailedException( vgPoolName, rc, out, err) _reset_blivet(blivetEnv) return blivetEnv.devicetree.getDeviceByName(poolLv.name) if os.path.ismount(mountPoint): raise ge.GlusterHostStorageMountPointInUseException(mountPoint) vgName = "vg-" + brickName poolName = "pool-" + brickName alignment = 0 chunkSize = 0 poolDataSize = 0 count = 0 metaDataSize = DEFAULT_METADATA_SIZE_KB if raidParams.get('type') == '6': count = raidParams['pdCount'] - 2 alignment = raidParams['stripeSize'] * count chunkSize = alignment elif raidParams.get('type') == '10': count = raidParams['pdCount'] / 2 alignment = raidParams['stripeSize'] * count chunkSize = DEFAULT_CHUNK_SIZE_KB blivetEnv = blivet.Blivet() _reset_blivet(blivetEnv) # get the devices list from the device name deviceList = _getDeviceList(devNameList) # raise an error when any device not actually found in the given list notFoundList = set(devNameList).difference( set([dev.name for dev in deviceList])) if notFoundList: raise ge.GlusterHostStorageDeviceNotFoundException(notFoundList) # raise an error when any device is used already in the given list inUseList = set(devNameList).difference(set([not _canCreateBrick( dev) or dev.name for dev in deviceList])) if inUseList: raise ge.GlusterHostStorageDeviceInUseException(inUseList) pvDeviceList = _makePartition(deviceList) pvDeviceList = _createPV(pvDeviceList, alignment) vg = _createVG(vgName, pvDeviceList, raidParams.get('stripeSize', 0)) # The following calculation is based on the redhat storage performance doc # http://docbuilder.usersys.redhat.com/22522 # /#chap-Configuring_Red_Hat_Storage_for_Enhancing_Performance # create ~16GB metadata LV (metaDataSize) that has a size which is # a multiple of RAID stripe width if it is > minimum vg size # otherwise allocate a minimum of 0.5% of the data device size # and create data LV (poolDataSize) that has a size which is # a multiple of stripe width if alignment: vgSizeKib = int(vg.size.convertTo(spec="KiB")) if vg.size.convertTo(spec='MiB') < MIN_VG_SIZE: metaDataSize = vgSizeKib * MIN_METADATA_PERCENT poolDataSize = vgSizeKib - metaDataSize metaDataSize = (metaDataSize - (metaDataSize % alignment)) poolDataSize = (poolDataSize - (poolDataSize % alignment)) # Creating a thin pool from the data LV and the metadata LV # lvconvert --chunksize alignment --thinpool VOLGROUP/thin_pool # --poolmetadata VOLGROUP/metadata_device_name pool = _createThinPool(poolName, vg, chunkSize, metaDataSize, poolDataSize) thinlv = LVMThinLogicalVolumeDevice(brickName, parents=[pool], size=pool.size, grow=True) blivetEnv.createDevice(thinlv) blivetEnv.doIt() if fsType != DEFAULT_FS_TYPE: log.error("fstype %s is currently unsupported" % fsType) raise ge.GlusterHostStorageDeviceMkfsFailedException( thinlv.path, alignment, raidParams.get('stripeSize', 0), fsType) format = blivet.formats.getFormat(DEFAULT_FS_TYPE, device=thinlv.path) format._defaultFormatOptions = ["-f", "-i", "size=512", "-n", "size=8192"] if raidParams.get('type') == '6': format._defaultFormatOptions += ["-d", "sw=%s,su=%sk" % ( count, raidParams.get('stripeSize'))] blivetEnv.formatDevice(thinlv, format) blivetEnv.doIt() try: os.makedirs(mountPoint) except OSError as e: if errno.EEXIST != e.errno: errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename) raise ge.GlusterHostStorageDeviceMakeDirsFailedException( err=[errMsg]) thinlv.format.setup(mountpoint=mountPoint) blivetEnv.doIt() # bz#1230495: lvm devices are invisible and appears only after vgscan # Workaround: Till the bz gets fixed, We use vgscan to refresh LVM devices rc, out, err = utils.execCmd([_vgscanCommandPath.cmd]) if rc: raise ge.GlusterHostStorageDeviceVGScanFailedException(rc, out, err) fstab.FsTab().add(thinlv.path, mountPoint, DEFAULT_FS_TYPE) return _getDeviceDict(thinlv)
0
import os import textwrap import pytest from tests.lib import ( _create_test_package_with_subdirectory, path_to_url, pyversion, requirements_file, ) from tests.lib.local_repos import local_checkout @pytest.mark.network def test_requirements_file(script): """ Test installing from a requirements file. """ other_lib_name, other_lib_version = 'anyjson', '0.3' script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\ INITools==0.2 # and something else to test out: %s<=%s """ % (other_lib_name, other_lib_version))) result = script.pip( 'install', '-r', script.scratch_path / 'initools-req.txt' ) assert ( script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion in result.files_created ) assert script.site_packages / 'initools' in result.files_created assert result.files_created[script.site_packages / other_lib_name].dir fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion) assert result.files_created[script.site_packages / fn].dir def test_schema_check_in_requirements_file(script): """ Test installing from a requirements file with an invalid vcs schema.. """ script.scratch_path.join("file-egg-req.txt").write( "\n%s\n" % ( "git://github.com/alex/django-fixture-generator.git" "#egg=fixture_generator" ) ) with pytest.raises(AssertionError): script.pip( "install", "-vvv", "-r", script.scratch_path / "file-egg-req.txt" ) def test_relative_requirements_file(script, data): """ Test installing from a requirements file with a relative path. For path URLs, use an egg= definition. """ egg_info_file = ( script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion ) egg_link_file = ( script.site_packages / 'FSPkg.egg-link' ) package_folder = script.site_packages / 'fspkg' # Compute relative install path to FSPkg from scratch path. full_rel_path = data.packages.join('FSPkg') - script.scratch_path full_rel_url = 'file:' + full_rel_path + '#egg=FSPkg' embedded_rel_path = script.scratch_path.join(full_rel_path) # For each relative path, install as either editable or not using either # URLs with egg links or not. for req_path in (full_rel_path, full_rel_url, embedded_rel_path): req_path = req_path.replace(os.path.sep, '/') # Regular install. with requirements_file(req_path + '\n', script.scratch_path) as reqs_file: result = script.pip('install', '-vvv', '-r', reqs_file.name, cwd=script.scratch_path) assert egg_info_file in result.files_created, str(result) assert package_folder in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') # Editable install. with requirements_file('-e ' + req_path + '\n', script.scratch_path) as reqs_file: result = script.pip('install', '-vvv', '-r', reqs_file.name, cwd=script.scratch_path) assert egg_link_file in result.files_created, str(result) script.pip('uninstall', '-y', 'fspkg') @pytest.mark.network def test_multiple_requirements_files(script, tmpdir): """ Test installing from multiple nested requirements files. """ other_lib_name, other_lib_version = 'anyjson', '0.3' script.scratch_path.join("initools-req.txt").write( textwrap.dedent(""" -e %s@10#egg=INITools -r %s-req.txt """) % ( local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), other_lib_name ), ) script.scratch_path.join("%s-req.txt" % other_lib_name).write( "%s<=%s" % (other_lib_name, other_lib_version) ) result = script.pip( 'install', '-r', script.scratch_path / 'initools-req.txt' ) assert result.files_created[script.site_packages / other_lib_name].dir fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion) assert result.files_created[script.site_packages / fn].dir assert script.venv / 'src' / 'initools' in result.files_created def test_package_in_constraints_and_dependencies(script, data): script.scratch_path.join("constraints.txt").write( "TopoRequires2==0.0.1\nTopoRequires==0.0.1" ) result = script.pip('install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'TopoRequires2') assert 'installed TopoRequires-0.0.1' in result.stdout def test_multiple_constraints_files(script, data): script.scratch_path.join("outer.txt").write("-c inner.txt") script.scratch_path.join("inner.txt").write( "Upper==1.0") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'outer.txt', 'Upper') assert 'installed Upper-1.0' in result.stdout def test_respect_order_in_requirements_file(script, data): script.scratch_path.join("frameworks-req.txt").write(textwrap.dedent("""\ parent child simple """)) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', script.scratch_path / 'frameworks-req.txt' ) downloaded = [line for line in result.stdout.split('\n') if 'Collecting' in line] assert 'parent' in downloaded[0], ( 'First download should be "parent" but was "%s"' % downloaded[0] ) assert 'child' in downloaded[1], ( 'Second download should be "child" but was "%s"' % downloaded[1] ) assert 'simple' in downloaded[2], ( 'Third download should be "simple" but was "%s"' % downloaded[2] ) def test_install_local_editable_with_extras(script, data): to_install = data.packages.join("LocalExtras") res = script.pip( 'install', '-e', to_install + '[bar]', '--process-dependency-links', expect_error=False, expect_stderr=True, ) assert script.site_packages / 'easy-install.pth' in res.files_updated, ( str(res) ) assert ( script.site_packages / 'LocalExtras.egg-link' in res.files_created ), str(res) assert script.site_packages / 'simple' in res.files_created, str(res) def test_install_collected_dependencies_first(script): result = script.pip_install_local( 'toporequires2', ) text = [line for line in result.stdout.split('\n') if 'Installing' in line][0] assert text.endswith('toporequires2') @pytest.mark.network def test_install_local_editable_with_subdirectory(script): version_pkg_path = _create_test_package_with_subdirectory(script, 'version_subdir') result = script.pip( 'install', '-e', '%s#egg=version_subpkg&subdirectory=version_subdir' % ('git+%s' % path_to_url(version_pkg_path),) ) result.assert_installed('version-subpkg', sub_dir='version_subdir') @pytest.mark.network def test_install_local_with_subdirectory(script): version_pkg_path = _create_test_package_with_subdirectory(script, 'version_subdir') result = script.pip( 'install', '%s#egg=version_subpkg&subdirectory=version_subdir' % ('git+' + path_to_url(version_pkg_path),) ) result.assert_installed('version_subpkg.py', editable=False) @pytest.mark.network def test_wheel_user_with_prefix_in_pydistutils_cfg( script, data, virtualenv, common_wheels): # Make sure wheel is available in the virtualenv script.pip('install', 'wheel', '--no-index', '-f', common_wheels) virtualenv.system_site_packages = True if os.name == 'posix': user_filename = ".pydistutils.cfg" else: user_filename = "pydistutils.cfg" user_cfg = os.path.join(os.path.expanduser('~'), user_filename) script.scratch_path.join("bin").mkdir() with open(user_cfg, "w") as cfg: cfg.write(textwrap.dedent(""" [install] prefix=%s""" % script.scratch_path)) result = script.pip( 'install', '--user', '--no-index', '-f', data.find_links, '-f', common_wheels, 'requiresupper') # Check that we are really installing a wheel assert 'Running setup.py install for requiresupper' not in result.stdout assert 'installed requiresupper' in result.stdout def test_install_option_in_requirements_file(script, data, virtualenv): """ Test --install-option in requirements file overrides same option in cli """ script.scratch_path.join("home1").mkdir() script.scratch_path.join("home2").mkdir() script.scratch_path.join("reqs.txt").write( textwrap.dedent( """simple --install-option='--home=%s'""" % script.scratch_path.join("home1"))) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', script.scratch_path / 'reqs.txt', '--install-option=--home=%s' % script.scratch_path.join("home2"), expect_stderr=True) package_dir = script.scratch / 'home1' / 'lib' / 'python' / 'simple' assert package_dir in result.files_created def test_constraints_not_installed_by_default(script, data): script.scratch_path.join("c.txt").write("requiresupper") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'c.txt', 'Upper') assert 'requiresupper' not in result.stdout def test_constraints_only_causes_error(script, data): script.scratch_path.join("c.txt").write("requiresupper") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'c.txt', expect_error=True) assert 'installed requiresupper' not in result.stdout def test_constraints_local_editable_install_causes_error(script, data): script.scratch_path.join("constraints.txt").write( "singlemodule==0.0.0" ) to_install = data.src.join("singlemodule") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', '-e', to_install, expect_error=True) assert 'Could not satisfy constraints for' in result.stderr def test_constraints_local_editable_install_pep518(script, data): to_install = data.src.join("pep518-3.0") script.pip('download', 'setuptools', 'wheel', '-d', data.packages) script.pip( 'install', '--no-index', '-f', data.find_links, '-e', to_install) def test_constraints_local_install_causes_error(script, data): script.scratch_path.join("constraints.txt").write( "singlemodule==0.0.0" ) to_install = data.src.join("singlemodule") result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', to_install, expect_error=True) assert 'Could not satisfy constraints for' in result.stderr def test_constraints_constrain_to_local_editable(script, data): to_install = data.src.join("singlemodule") script.scratch_path.join("constraints.txt").write( "-e %s#egg=singlemodule" % path_to_url(to_install) ) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'singlemodule') assert 'Running setup.py develop for singlemodule' in result.stdout def test_constraints_constrain_to_local(script, data): to_install = data.src.join("singlemodule") script.scratch_path.join("constraints.txt").write( "%s#egg=singlemodule" % path_to_url(to_install) ) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', 'singlemodule') assert 'Running setup.py install for singlemodule' in result.stdout def test_constrained_to_url_install_same_url(script, data): to_install = data.src.join("singlemodule") constraints = path_to_url(to_install) + "#egg=singlemodule" script.scratch_path.join("constraints.txt").write(constraints) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-c', script.scratch_path / 'constraints.txt', to_install) assert ('Running setup.py install for singlemodule' in result.stdout), str(result) @pytest.mark.network def test_double_install_spurious_hash_mismatch( script, tmpdir, data, common_wheels): """Make sure installing the same hashed sdist twice doesn't throw hash mismatch errors. Really, this is a test that we disable reads from the wheel cache in hash-checking mode. Locally, implicitly built wheels of sdists obviously have different hashes from the original archives. Comparing against those causes spurious mismatch errors. """ # Install wheel package, otherwise, it won't try to build wheels. script.pip('install', 'wheel', '--no-index', '-f', common_wheels) with requirements_file('simple==1.0 --hash=sha256:393043e672415891885c9a2a' '0929b1af95fb866d6ca016b42d2e6ce53619b653', tmpdir) as reqs_file: # Install a package (and build its wheel): result = script.pip_install_local( '--find-links', data.find_links, '-f', common_wheels, '-r', reqs_file.abspath, expect_error=False) assert 'Successfully installed simple-1.0' in str(result) # Uninstall it: script.pip('uninstall', '-y', 'simple', expect_error=False) # Then install it again. We should not hit a hash mismatch, and the # package should install happily. result = script.pip_install_local( '--find-links', data.find_links, '-f', common_wheels, '-r', reqs_file.abspath, expect_error=False) assert 'Successfully installed simple-1.0' in str(result) def test_install_with_extras_from_constraints(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras') assert script.site_packages / 'simple' in result.files_created def test_install_with_extras_from_install(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]') assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_with_extras_joined(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]' ) assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_with_extras_editable_joined(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "-e %s#egg=LocalExtras[bar]" % path_to_url(to_install) ) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', 'LocalExtras[baz]') assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py'in result.files_created def test_install_distribution_full_union(script, data): to_install = data.packages.join("LocalExtras") result = script.pip_install_local( to_install, to_install + "[bar]", to_install + "[baz]") assert 'Running setup.py install for LocalExtras' in result.stdout assert script.site_packages / 'simple' in result.files_created assert script.site_packages / 'singlemodule.py' in result.files_created def test_install_distribution_duplicate_extras(script, data): to_install = data.packages.join("LocalExtras") package_name = to_install + "[bar]" with pytest.raises(AssertionError): result = script.pip_install_local(package_name, package_name) assert 'Double requirement given: %s' % package_name in result.stderr def test_install_distribution_union_with_constraints(script, data): to_install = data.packages.join("LocalExtras") script.scratch_path.join("constraints.txt").write( "%s[bar]" % to_install) result = script.pip_install_local( '-c', script.scratch_path / 'constraints.txt', to_install + '[baz]') assert 'Running setup.py install for LocalExtras' in result.stdout assert script.site_packages / 'singlemodule.py' in result.files_created def test_install_distribution_union_with_versions(script, data): to_install_001 = data.packages.join("LocalExtras") to_install_002 = data.packages.join("LocalExtras-0.0.2") result = script.pip_install_local( to_install_001 + "[bar]", to_install_002 + "[baz]") assert ("Successfully installed LocalExtras-0.0.1 simple-3.0 " + "singlemodule-0.0.1" in result.stdout) @pytest.mark.xfail def test_install_distribution_union_conflicting_extras(script, data): # LocalExtras requires simple==1.0, LocalExtras[bar] requires simple==2.0; # without a resolver, pip does not detect the conflict between simple==1.0 # and simple==2.0. Once a resolver is added, this conflict should be # detected. to_install = data.packages.join("LocalExtras-0.0.2") result = script.pip_install_local(to_install, to_install + "[bar]", expect_error=True) assert 'installed' not in result.stdout assert "Conflict" in result.stderr def test_install_unsupported_wheel_link_with_marker(script): script.scratch_path.join("with-marker.txt").write( textwrap.dedent("""\ %s; %s """) % ( 'https://github.com/a/b/c/asdf-1.5.2-cp27-none-xyz.whl', 'sys_platform == "xyz"' ) ) result = script.pip( 'install', '-r', script.scratch_path / 'with-marker.txt', expect_error=False, ) assert ("Ignoring asdf: markers 'sys_platform == \"xyz\"' don't match " "your environment") in result.stdout assert len(result.files_created) == 0 def test_install_unsupported_wheel_file(script, data): # Trying to install a local wheel with an incompatible version/type # should fail. script.scratch_path.join("wheel-file.txt").write(textwrap.dedent("""\ %s """ % data.packages.join("simple.dist-0.1-py1-none-invalid.whl"))) result = script.pip( 'install', '-r', script.scratch_path / 'wheel-file.txt', expect_error=True, expect_stderr=True, ) assert ("simple.dist-0.1-py1-none-invalid.whl is not a supported " + "wheel on this platform" in result.stderr) assert len(result.files_created) == 0 def test_install_options_local_to_package(script, data): """Make sure --install-options does not leak across packages. A requirements.txt file can have per-package --install-options; these should be isolated to just the package instead of leaking to subsequent packages. This needs to be a functional test because the bug was around cross-contamination at install time. """ home_simple = script.scratch_path.join("for-simple") test_simple = script.scratch.join("for-simple") home_simple.mkdir() reqs_file = script.scratch_path.join("reqs.txt") reqs_file.write( textwrap.dedent(""" simple --install-option='--home=%s' INITools """ % home_simple)) result = script.pip( 'install', '--no-index', '-f', data.find_links, '-r', reqs_file, expect_error=True, ) simple = test_simple / 'lib' / 'python' / 'simple' bad = test_simple / 'lib' / 'python' / 'initools' good = script.site_packages / 'initools' assert simple in result.files_created assert result.files_created[simple].dir assert bad not in result.files_created assert good in result.files_created assert result.files_created[good].dir
0.000144
#!/usr/bin/env python import tensorflow as tf import numpy as np trX = np.linspace(-1, 1, 101) trY = 2 * trX + np.random.randn(*trX.shape) * 0.33 # create a y value which is approximately linear but with some random noise X = tf.placeholder("float") # create symbolic variables Y = tf.placeholder("float") def model(X, w): return tf.multiply(X, w) # lr is just X*w so this model line is pretty simple w = tf.Variable(0.0, name="weights") # create a shared variable (like theano.shared) for the weight matrix y_model = model(X, w) cost = tf.square(Y - y_model) # use square error for cost function train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # construct an optimizer to minimize cost and fit line to my data # Launch the graph in a session with tf.Session() as sess: # you need to initialize variables (in this case just variable W) tf.global_variables_initializer().run() for i in range(100): for (x, y) in zip(trX, trY): sess.run(train_op, feed_dict={X: x, Y: y}) print(sess.run(w)) # It should be something around 2
0.009149
# encoding: utf-8 # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski ([email protected]) # from __future__ import absolute_import, division, unicode_literals import os import platform import subprocess from mo_dots import set_default, to_data, Null, Data from mo_files import File from mo_future import text from mo_logs import Log, strings from mo_logs.exceptions import Except from mo_threads.lock import Lock from mo_threads.queues import Queue from mo_threads.signals import Signal from mo_threads.threads import THREAD_STOP, Thread from mo_threads.till import Till from mo_times import Timer DEBUG = False class Process(object): next_process_id = 0 def __init__(self, name, params, cwd=None, env=None, debug=False, shell=False, bufsize=-1): """ Spawns multiple threads to manage the stdin/stdout/stderr of the child process; communication is done via proper thread-safe queues of the same name. Since the process is managed and monitored by threads, the main thread is not blocked when the child process encounters problems :param name: name given to this process :param params: list of strings for program name and parameters :param cwd: current working directory :param env: enviroment variables :param debug: true to be verbose about stdin/stdout :param shell: true to run as command line :param bufsize: if you want to screw stuff up """ self.debug = debug or DEBUG self.process_id = Process.next_process_id Process.next_process_id += 1 self.name = name + " (" + text(self.process_id) + ")" self.service_stopped = Signal("stopped signal for " + strings.quote(name)) self.stdin = Queue("stdin for process " + strings.quote(name), silent=not self.debug) self.stdout = Queue("stdout for process " + strings.quote(name), silent=not self.debug) self.stderr = Queue("stderr for process " + strings.quote(name), silent=not self.debug) try: if cwd == None: cwd = os.getcwd() else: cwd = str(cwd) command = [str(p) for p in params] self.debug and Log.note("command: {{command}}", command=command) self.service = service = subprocess.Popen( [str(p) for p in params], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=bufsize, cwd=cwd, env={str(k): str(v) for k, v in set_default(env, os.environ).items()}, shell=shell ) self.please_stop = Signal() self.please_stop.then(self._kill) self.child_locker = Lock() self.children = [ Thread.run(self.name + " stdin", self._writer, service.stdin, self.stdin, please_stop=self.service_stopped, parent_thread=self), Thread.run(self.name + " stdout", self._reader, "stdout", service.stdout, self.stdout, please_stop=self.service_stopped, parent_thread=self), Thread.run(self.name + " stderr", self._reader, "stderr", service.stderr, self.stderr, please_stop=self.service_stopped, parent_thread=self), Thread.run(self.name + " waiter", self._monitor, parent_thread=self), ] except Exception as e: Log.error("Can not call", e) self.debug and Log.note("{{process}} START: {{command}}", process=self.name, command=" ".join(map(strings.quote, params))) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.join(raise_on_error=True) def stop(self): self.stdin.add(THREAD_STOP) # ONE MORE SEND self.please_stop.go() def join(self, raise_on_error=False): self.service_stopped.wait() with self.child_locker: child_threads, self.children = self.children, [] for c in child_threads: c.join() if raise_on_error and self.returncode != 0: Log.error( "{{process}} FAIL: returncode={{code}}\n{{stderr}}", process=self.name, code=self.service.returncode, stderr=list(self.stderr) ) return self def remove_child(self, child): with self.child_locker: try: self.children.remove(child) except Exception: pass @property def pid(self): return self.service.pid @property def returncode(self): return self.service.returncode def _monitor(self, please_stop): with Timer(self.name, verbose=self.debug): self.service.wait() self.debug and Log.note("{{process}} STOP: returncode={{returncode}}", process=self.name, returncode=self.service.returncode) self.service_stopped.go() please_stop.go() def _reader(self, name, pipe, receive, please_stop): try: while not please_stop and self.service.returncode is None: line = to_text(pipe.readline().rstrip()) if line: receive.add(line) self.debug and Log.note("{{process}} ({{name}}): {{line}}", name=name, process=self.name, line=line) else: (Till(seconds=1) | please_stop).wait() # GRAB A FEW MORE LINES max = 100 while max: try: line = to_text(pipe.readline().rstrip()) if line: max = 100 receive.add(line) self.debug and Log.note("{{process}} RESIDUE: ({{name}}): {{line}}", name=name, process=self.name, line=line) else: max -= 1 except Exception: break finally: pipe.close() receive.add(THREAD_STOP) self.debug and Log.note("{{process}} ({{name}} is closed)", name=name, process=self.name) receive.add(THREAD_STOP) def _writer(self, pipe, send, please_stop): while not please_stop: line = send.pop(till=please_stop) if line is THREAD_STOP: please_stop.go() break elif line is None: continue self.debug and Log.note("{{process}} (stdin): {{line}}", process=self.name, line=line.rstrip()) pipe.write(line.encode('utf8') + b"\n") pipe.flush() def _kill(self): try: self.service.kill() Log.note("Service was successfully terminated.") except Exception as e: ee = Except.wrap(e) if 'The operation completed successfully' in ee: return if 'No such process' in ee: return Log.warning("Failure to kill process {{process|quote}}", process=self.name, cause=ee) WINDOWS_ESCAPE_DCT = { u"%": u"%%", u"&": u"^&", u"\\": u"^\\", u"<": u"^<", u">": u"^>", u"^": u"^^", u"|": u"^|", u"\t": u"^\t", u"\n": u"^\n", u"\r": u"^\r", u" ": u"^ ", } PROMPT = "READY_FOR_MORE" def cmd_escape(value): if isinstance(value, File): return value.abspath return strings.quote(value) if "windows" in platform.system().lower(): def set_prompt(): return "prompt "+PROMPT+"$g" def cmd(): return "%windir%\\system32\\cmd.exe" def to_text(value): return value.decode("latin1") else: def set_prompt(): return "set prompt="+cmd_escape(PROMPT+">") def cmd(): return "bash" def to_text(value): return value.decode("latin1") class Command(object): """ FASTER Process CLASS - OPENS A COMMAND_LINE APP (CMD on windows) AND KEEPS IT OPEN FOR MULTIPLE COMMANDS EACH WORKING DIRECTORY WILL HAVE ITS OWN PROCESS, MULTIPLE PROCESSES WILL OPEN FOR THE SAME DIR IF MULTIPLE THREADS ARE REQUESTING Commands """ available_locker = Lock("cmd lock") available_process = {} def __init__(self, name, params, cwd=None, env=None, debug=False, shell=True, bufsize=-1): self.name = name self.key = ( cwd, Data(**(env or {})), # env WILL BE UPDATED BY CALLEE debug, shell ) self.stdout = Queue("stdout for "+name) self.stderr = Queue("stderr for "+name) with Command.available_locker: avail = Command.available_process.setdefault(self.key, []) if not avail: self.process = Process("command shell", [cmd()], cwd, env, debug, shell, bufsize) self.process.stdin.add(set_prompt()) self.process.stdin.add("echo %errorlevel%") _wait_for_start(self.process.stdout, Null) else: self.process = avail.pop() self.process.stdin.add(" ".join(cmd_escape(p) for p in params)) self.process.stdin.add("echo %errorlevel%") self.stdout_thread = Thread.run("", self._stream_relay, self.process.stdout, self.stdout) self.stderr_thread = Thread.run("", self._stream_relay, self.process.stderr, self.stderr) self.returncode = None def join(self, raise_on_error=False, till=None): try: try: # WAIT FOR COMMAND LINE RESPONSE ON stdout self.stdout_thread.join() except Exception as e: Log.error("unexpected problem processing stdout", cause=e) try: self.stderr_thread.please_stop.go() self.stderr_thread.join() except Exception as e: Log.error("unexpected problem processing stderr", cause=e) if raise_on_error and self.returncode != 0: Log.error( "{{process}} FAIL: returncode={{code}}\n{{stderr}}", process=self.name, code=self.returncode, stderr=list(self.stderr) ) return self finally: with Command.available_locker: Command.available_process[self.key].append(self.process) def _stream_relay(self, source, destination, please_stop=None): """ :param source: :param destination: :param error: Throw error if line shows up :param please_stop: :return: """ prompt_count = 0 prompt = PROMPT + ">" line_count = 0 while not please_stop: value = source.pop(till=please_stop) if value is None: destination.add(THREAD_STOP) return elif value is THREAD_STOP: destination.add(THREAD_STOP) return elif line_count==0 and "is not recognized as an internal or external command" in value: Log.error("Problem with command: {{desc}}", desc=value) elif value.startswith(prompt): if prompt_count: # GET THE ERROR LEVEL self.returncode = int(source.pop(till=please_stop)) destination.add(THREAD_STOP) return else: prompt_count += 1 else: line_count += 1 destination.add(value) def _wait_for_start(source, destination): prompt = PROMPT + ">" while True: value = source.pop() if value.startswith(prompt): # GET THE ERROR LEVEL returncode = int(source.pop()) destination.add(THREAD_STOP) return destination.add(value)
0.002395
from __future__ import unicode_literals import codecs import glob import os from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import find_command, popen_wrapper from django.utils._os import npath, upath def has_bom(fn): with open(fn, 'rb') as f: sample = f.read(4) return sample[:3] == b'\xef\xbb\xbf' or \ sample.startswith(codecs.BOM_UTF16_LE) or \ sample.startswith(codecs.BOM_UTF16_BE) def is_writable(path): # Known side effect: updating file access/modified time to current time if # it is writable. try: with open(path, 'a'): os.utime(path, None) except (IOError, OSError): return False return True class Command(BaseCommand): help = 'Compiles .po files to .mo files for use with builtin gettext support.' requires_system_checks = False leave_locale_alone = True program = 'msgfmt' program_options = ['--check-format'] def add_arguments(self, parser): parser.add_argument('--locale', '-l', dest='locale', action='append', default=[], help='Locale(s) to process (e.g. de_AT). Default is to process all. ' 'Can be used multiple times.') parser.add_argument('--exclude', '-x', dest='exclude', action='append', default=[], help='Locales to exclude. Default is none. Can be used multiple times.') def handle(self, **options): locale = options.get('locale') exclude = options.get('exclude') self.verbosity = int(options.get('verbosity')) if find_command(self.program) is None: raise CommandError("Can't find %s. Make sure you have GNU gettext " "tools 0.15 or newer installed." % self.program) basedirs = [os.path.join('conf', 'locale'), 'locale'] if os.environ.get('DJANGO_SETTINGS_MODULE'): from django.conf import settings basedirs.extend([upath(path) for path in settings.LOCALE_PATHS]) # Gather existing directories. basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs))) if not basedirs: raise CommandError("This script should be run from the Django Git " "checkout or your project or app tree, or with " "the settings module specified.") # Build locale list all_locales = [] for basedir in basedirs: locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % basedir)) all_locales.extend(map(os.path.basename, locale_dirs)) # Account for excluded locales locales = locale or all_locales locales = set(locales) - set(exclude) for basedir in basedirs: if locales: dirs = [os.path.join(basedir, l, 'LC_MESSAGES') for l in locales] else: dirs = [basedir] locations = [] for ldir in dirs: for dirpath, dirnames, filenames in os.walk(ldir): locations.extend((dirpath, f) for f in filenames if f.endswith('.po')) if locations: self.compile_messages(locations) def compile_messages(self, locations): """ Locations is a list of tuples: [(directory, file), ...] """ for i, (dirpath, f) in enumerate(locations): if self.verbosity > 0: self.stdout.write('processing file %s in %s\n' % (f, dirpath)) po_path = os.path.join(dirpath, f) if has_bom(po_path): raise CommandError("The %s file has a BOM (Byte Order Mark). " "Django only supports .po files encoded in " "UTF-8 and without any BOM." % po_path) base_path = os.path.splitext(po_path)[0] # Check writability on first location if i == 0 and not is_writable(npath(base_path + '.mo')): self.stderr.write("The po files under %s are in a seemingly not writable location. " "mo files will not be updated/created." % dirpath) return args = [self.program] + self.program_options + ['-o', npath(base_path + '.mo'), npath(base_path + '.po')] output, errors, status = popen_wrapper(args) if status: if errors: msg = "Execution of %s failed: %s" % (self.program, errors) else: msg = "Execution of %s failed" % self.program raise CommandError(msg)
0.002987
# coding=utf-8 # Copyright 2021 TF.Text Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF.Text is a TensorFlow library of text related ops, modules, and subgraphs. TF.Text is a TensorFlow library of text related ops, modules, and subgraphs. The library can perform the preprocessing regularly required by text-based models, and includes other features useful for sequence modeling not provided by core TensorFlow. See the README on GitHub for further documentation. http://github.com/tensorflow/text """ import os from setuptools import find_packages from setuptools import setup from setuptools.command.install import install from setuptools.dist import Distribution project_name = 'tensorflow-text' project_version = '2.6.0-rc0' class BinaryDistribution(Distribution): """This class is needed in order to create OS specific wheels.""" def is_pure(self): return False def has_ext_modules(self): return True class InstallPlatlib(install): """This is needed to set the library to platlib compliant.""" def finalize_options(self): """For more info; see http://github.com/google/or-tools/issues/616 .""" install.finalize_options(self) self.install_lib = self.install_platlib self.install_libbase = self.install_lib self.install_lib = os.path.join(self.install_lib, self.extra_dirs) DOCLINES = __doc__.split('\n') setup( name=project_name, version=project_version.replace('-', ''), description=DOCLINES[0], long_description='\n'.join(DOCLINES[2:]), author='Google Inc.', author_email='[email protected]', url='http://github.com/tensorflow/text', license='Apache 2.0', packages=find_packages(), include_package_data=True, zip_safe=False, cmdclass={'install': InstallPlatlib}, distclass=BinaryDistribution, install_requires=[ 'tensorflow>=2.6.0rc0, <2.7', 'tensorflow_hub>=0.8.0', ], extras_require={ 'tensorflow_cpu': ['tensorflow-cpu>=2.4.0rc0, <2.5',], 'tests': [ 'absl-py', 'pytest', 'tensorflow-datasets>=3.2.0', ], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords='tensorflow text machine learning', )
0.002006
from __future__ import print_function __author__ = 'Frank Sehnke, [email protected]' from pybrain.rl.environments import EpisodicTask from pybrain.rl.environments.ode.sensors import SpecificBodyPositionSensor from scipy import tanh, zeros, array, random, sqrt, asarray #Basic class for all ccrl tasks class CCRLTask(EpisodicTask): def __init__(self, env): EpisodicTask.__init__(self, env) #Overall maximal tourque - is multiplied with relative max tourque for individual joint. self.maxPower = 100.0 self.reward_history = [] self.count = 0 #timestep counter self.epiLen = 1500 #suggestet episodic length for normal Johnnie tasks self.incLearn = 0 #counts the task resets for incrementall learning self.env.FricMu = 20.0 #We need higher friction for CCRL self.env.dt = 0.002 #We also need more timly resolution # normalize standard sensors to (-1, 1) self.sensor_limits = [] #Angle sensors for i in range(self.env.actLen): self.sensor_limits.append((self.env.cLowList[i], self.env.cHighList[i])) # Joint velocity sensors for i in range(self.env.actLen): self.sensor_limits.append((-20, 20)) #Norm all actor dimensions to (-1, 1) self.actor_limits = [(-1, 1)] * env.actLen self.oldAction = zeros(env.actLen, float) self.dist = zeros(9, float) self.dif = array([0.0, 0.0, 0.0]) self.target = array([-6.5, 1.75, -10.5]) self.grepRew = 0.0 self.tableFlag = 0.0 self.env.addSensor(SpecificBodyPositionSensor(['objectP00'], "glasPos")) self.env.addSensor(SpecificBodyPositionSensor(['palmLeft'], "palmPos")) self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft1'], "finger1Pos")) self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft2'], "finger2Pos")) #we changed sensors so we need to update environments sensorLength variable self.env.obsLen = len(self.env.getSensors()) #normalization for the task spezific sensors for i in range(self.env.obsLen - 2 * self.env.actLen): self.sensor_limits.append((-4, 4)) self.actor_limits = None def getObservation(self): """ a filtered mapping to getSample of the underlying environment. """ sensors = self.env.getSensors() #Sensor hand to target object for i in range(3): self.dist[i] = ((sensors[self.env.obsLen - 9 + i] + sensors[self.env.obsLen - 6 + i] + sensors[self.env.obsLen - 3 + i]) / 3.0 - (sensors[self.env.obsLen - 12 + i] + self.dif[i])) * 4.0 #sensors[self.env.obsLen-12+i] #Sensor hand angle to horizontal plane X-Axis for i in range(3): self.dist[i + 3] = (sensors[self.env.obsLen - 3 + i] - sensors[self.env.obsLen - 6 + i]) * 5.0 #Sensor hand angle to horizontal plane Y-Axis for i in range(3): self.dist[i + 6] = ((sensors[self.env.obsLen - 3 + i] + sensors[self.env.obsLen - 6 + i]) / 2.0 - sensors[self.env.obsLen - 9 + i]) * 10.0 if self.sensor_limits: sensors = self.normalize(sensors) sens = [] for i in range(self.env.obsLen - 12): sens.append(sensors[i]) for i in range(9): sens.append(self.dist[i]) for i in self.oldAction: sens.append(i) return sens def performAction(self, action): #Filtered mapping towards performAction of the underlying environment #The standard CCRL task uses a PID controller to controll directly angles instead of forces #This makes most tasks much simpler to learn self.oldAction = action #Grasping as reflex depending on the distance to target - comment in for more easy grasping if abs(abs(self.dist[:3]).sum())<2.0: action[15]=1.0 #self.grepRew=action[15]*.01 else: action[15]=-1.0 #self.grepRew=action[15]*-.03 isJoints=array(self.env.getSensorByName('JointSensor')) #The joint angles isSpeeds=array(self.env.getSensorByName('JointVelocitySensor')) #The joint angular velocitys act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall action=tanh((act-isJoints-0.9*isSpeeds*self.env.tourqueList)*16.0)*self.maxPower*self.env.tourqueList #simple PID EpisodicTask.performAction(self, action) #self.env.performAction(action) def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: self.count += 1 return False def res(self): #sets counter and history back, increases incremental counter self.count = 0 self.incLearn += 1 self.reward_history.append(self.getTotalReward()) self.tableFlag = 0.0 def getReward(self): #rewarded for approaching the object dis = sqrt((self.dist[0:3] ** 2).sum()) return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1 #Learn to grasp a glas at a fixed location class CCRLGlasTask(CCRLTask): def __init__(self, env): CCRLTask.__init__(self, env) self.dif = array([0.0, 0.0, 0.0]) self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: self.count += 1 return False def getReward(self): if self.env.glasSum >= 2: grip = 1000.0 else: grip = 0.0 if self.env.tableSum > 0: self.tableFlag = -1.0 else: tableFlag = 0.0 self.dist[3] = 0.0 self.dist[8] = 0.0 dis = 100.0/((self.dist[:3] ** 2).sum()+0.1) nig = 10.0/((self.dist[3:] ** 2).sum()+0.1) if self.env.stepCounter == self.epiLen: print(("Grip:", grip, "Dis:", dis, "Nig:", nig, "Table:", self.tableFlag)) return (10 + grip + nig + dis + self.tableFlag) / float(self.epiLen) #-dis #else: # return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #+self.grepRew (10.0-dis)/float(self.epiLen)+ #Learn to grasp a plate at a fixed location class CCRLPlateTask(CCRLTask): def __init__(self, env): CCRLTask.__init__(self, env) self.dif = array([0.0, 0.2, 0.8]) self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: if self.count == 1: self.pertGlasPos(0) self.count += 1 return False def pertGlasPos(self, num): if num == 0: self.env.pert = asarray([0.0, 0.0, 0.5]) def getReward(self): if self.env.glasSum >= 2: grip = 1.0 else: grip = 0.0 if self.env.tableSum > 0: self.tableFlag = 10.0 #self.dist[4]=0.0 #self.dist[8]=0.0 dis = sqrt((self.dist[0:3] ** 2).sum()) if self.count == self.epiLen: return 25.0 + grip - dis - self.tableFlag #/nig else: return (25.0 - dis) / float(self.epiLen) + (grip - float(self.env.tableSum)) * 0.1 #/nig -(1.0+self.oldAction[15]) #Learn to grasp a glas at 5 different locations class CCRLGlasVarTask(CCRLGlasTask): def __init__(self, env): CCRLGlasTask.__init__(self, env) self.epiLen = 5000 #suggestet episodic length for normal Johnnie tasks def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: if self.count == 1: self.pertGlasPos(0) if self.count == self.epiLen / 5 + 1: self.env.reset() self.pertGlasPos(1) if self.count == 2 * self.epiLen / 5 + 1: self.env.reset() self.pertGlasPos(2) if self.count == 3 * self.epiLen / 5 + 1: self.env.reset() self.pertGlasPos(3) if self.count == 4 * self.epiLen / 5 + 1: self.env.reset() self.pertGlasPos(4) self.count += 1 return False def pertGlasPos(self, num): if num == 0: self.env.pert = asarray([1.0, 0.0, 0.5]) if num == 1: self.env.pert = asarray([-1.0, 0.0, 0.5]) if num == 2: self.env.pert = asarray([1.0, 0.0, 0.0]) if num == 3: self.env.pert = asarray([-1.0, 0.0, 0.0]) if num == 4: self.env.pert = asarray([0.0, 0.0, 0.25]) def getReward(self): if self.env.glasSum >= 2: grip = 1.0 else: grip = 0.0 if self.env.tableSum > 0: self.tableFlag = 10.0 self.dist[3] = 0.0 self.dist[8] = 0.0 dis = sqrt((self.dist ** 2).sum()) nig = (abs(self.dist[4]) + 1.0) if self.count == self.epiLen or self.count == self.epiLen / 5 or self.count == 2 * self.epiLen / 5 or self.count == 3 * self.epiLen / 5 or self.count == 4 * self.epiLen / 5: return 25.0 + grip / nig - dis - self.tableFlag #/nig else: return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #/nig #Learn to grasp a glas at random locations class CCRLGlasVarRandTask(CCRLGlasVarTask): def pertGlasPos(self, num): self.env.pert = asarray([random.random()*2.0 - 1.0, 0.0, random.random()*0.5 + 0.5]) #Some experimental stuff class CCRLPointTask(CCRLGlasVarTask): def __init__(self, env): CCRLGlasVarTask.__init__(self, env) self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: if self.count == 1: self.pertGlasPos(0) self.count += 1 return False def getObservation(self): """ a filtered mapping to getSample of the underlying environment. """ sensors = self.env.getSensors() sensSort = [] #Angle and angleVelocity for i in range(32): sensSort.append(sensors[i]) #Angles wanted (old action) for i in self.oldAction: sensSort.append(i) #Hand position for i in range(3): sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2) #Hand orientation (Hack - make correkt!!!!) sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll #Target position for i in range(3): sensSort.append(self.target[i]) #Target orientation for i in range(3): sensSort.append(0.0) #Object type (start with random) sensSort.append(float(random.randint(-1, 1))) #roll #normalisation if self.sensor_limits: sensors = self.normalize(sensors) sens = [] for i in range(32): sens.append(sensors[i]) for i in range(29): sens.append(sensSort[i + 32]) #calc dist to target self.dist = array([(sens[54] - sens[48]), (sens[55] - sens[49]), (sens[56] - sens[50]), sens[51], sens[52], sens[53], sens[15]]) return sens def pertGlasPos(self, num): if num == 0: self.target = asarray([0.0, 0.0, 1.0]) self.env.pert = self.target.copy() self.target = self.target.copy() + array([-6.5, 1.75, -10.5]) def getReward(self): dis = sqrt((self.dist ** 2).sum()) return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1 class CCRLPointVarTask(CCRLPointTask): def __init__(self, env): CCRLPointTask.__init__(self, env) self.epiLen = 2000 #suggestet episodic length for normal Johnnie tasks def isFinished(self): #returns true if episode timesteps has reached episode length and resets the task if self.count > self.epiLen: self.res() return True else: if self.count == 1: self.pertGlasPos(0) if self.count == self.epiLen / 2 + 1: self.env.reset() self.pertGlasPos(1) self.count += 1 return False def getObservation(self): """ a filtered mapping to getSample of the underlying environment. """ sensors = self.env.getSensors() sensSort = [] #Angle and angleVelocity for i in range(32): sensSort.append(sensors[i]) #Angles wanted (old action) for i in self.oldAction: sensSort.append(i) #Hand position for i in range(3): sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2) #Hand orientation (Hack - make correkt!!!!) sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll #Target position for i in range(3): sensSort.append(self.target[i]) #Target orientation for i in range(3): sensSort.append(0.0) #Object type (start with random) sensSort.append(float(random.randint(-1, 1))) #roll #normalisation if self.sensor_limits: sensors = self.normalize(sensors) sens = [] for i in range(32): sens.append(sensors[i]) for i in range(29): sens.append(sensSort[i + 32]) #calc dist to target self.dist = array([(sens[54] - sens[48]) * 10.0, (sens[55] - sens[49]) * 10.0, (sens[56] - sens[50]) * 10.0, sens[51], sens[52], sens[53], 1.0 + sens[15]]) return sens def pertGlasPos(self, num): if num == 0: self.target = asarray([1.0, 0.0, 1.0]) if num == 1: self.target = asarray([-1.0, 0.0, 1.0]) if num == 2: self.target = asarray([1.0, 0.0, 0.0]) if num == 3: self.target = asarray([-1.0, 0.0, 0.0]) if num == 4: self.target = asarray([0.0, 0.0, 0.5]) self.env.pert = self.target.copy() self.target = self.target.copy() + array([-6.5, 1.75, -10.5]) def getReward(self): dis = sqrt((self.dist ** 2).sum()) subEpi = self.epiLen / 2 if self.count == self.epiLen or self.count == subEpi: return (25.0 - dis) / 2.0 else: return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
0.012033