repository_name
stringlengths 7
56
| func_path_in_repository
stringlengths 10
101
| func_name
stringlengths 12
78
| language
stringclasses 1
value | func_code_string
stringlengths 74
11.9k
| func_documentation_string
stringlengths 3
8.03k
| split_name
stringclasses 1
value | func_code_url
stringlengths 98
213
| enclosing_scope
stringlengths 42
98.2k
|
---|---|---|---|---|---|---|---|---|
kristianmandrup/roles_generic | lib/roles_generic/generic/user/implementation.rb | Roles::Generic::User.Implementation.roles | ruby | def roles
return [] if get_roles.nil?
x = [get_roles].flatten.map do |role|
role.respond_to?(:to_sym) ? role.to_sym : role
end
x.first.kind_of?(Set) ? x.first.to_a : x
end | query assigned roles | train | https://github.com/kristianmandrup/roles_generic/blob/94588ac58bcca1f44ace5695d1984da1bd98fe1a/lib/roles_generic/generic/user/implementation.rb#L112-L118 | module Implementation
include Roles::Generic::RoleUtil
def role_attribute
strategy_class.roles_attribute_name
end
# set a single role
def role= role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
self.roles = role
end
# add a single role
def add_role role
raise ArgumentError, '#add_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
add_roles role
end
# remove a single role
def remove_role role
raise ArgumentError, '#remove_role takes a single role String or Symbol as the argument' if !role || role.kind_of?(Array)
remove_roles role
end
# should exchange the current role if in list with the first valid role in :with argument
def exchange_roles *role_names
options = last_option role_names
raise ArgumentError, "Must take an options hash as last argument with a :with option signifying which role(s) to replace with" if !options || !options.kind_of?(Hash)
remove_roles(role_names.to_symbols)
with_roles = options[:with]
add_roles(with_roles)
end
def exchange_role role, options = {}
raise ArgumentError, '#exchange_role takes a single role String or Symbol as the first argument' if !role || role.kind_of?(Array)
raise ArgumentError, '#exchange_role takes a an options hash with a :with option as the last argument' if !options || !options[:with]
if options[:with].kind_of?(Array) && self.class.role_strategy.multiplicity == :single
raise ArgumentError, '#exchange_role should only take a single role to exchange with for a Role strategy with multiplicity of one' if options[:with].size > 1
end
exchange_roles role, options
end
# is_in_group? :admin
def is_in_group? group
raise ArgumentError, 'Group id must be a String or Symbol' if !group.kind_of_label?
group_roles = self.class.role_groups[group]
# puts "group_roles: #{group_roles} for group: #{group}"
# puts "roles_list: #{roles_list}"
!(group_roles & roles_list).empty?
end
alias_method :is_member_of?, :is_in_group?
# is_in_groups? :editor, :admin,
def is_in_groups? *groups
groups = groups.flat_uniq
groups.all? {|group| is_in_group? group}
end
def is_in_any_group? *groups
groups = groups.flat_uniq
groups.any? {|group| is_in_group? group}
end
# check if all of the roles listed have been assigned to that user
def has_roles?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(compare_roles - roles_list).empty?
end
# check if any of the roles listed have been assigned to that user
def has_any_role?(*roles_names)
compare_roles = extract_roles(roles_names.flat_uniq)
(roles_list & compare_roles).not.empty?
end
# check if any (at least ONE) of the given roles have been assigned
def has_role? role_name
raise ArgumentError, '#has_role? should take a single role String or Symbol as the argument' if !role_name || role_name.kind_of?(Array)
has_roles? role_name
end
def valid_role? role
strategy_class.valid_roles.include? role.to_sym
end
def valid_roles? *roles
roles.each do |role|
return false if !valid_role? role
end
true
end
def valid_roles
strategy_class.valid_roles
end
def admin?
is? :admin
end
# assign multiple roles
def roles=(*role_names)
role_names = role_names.flat_uniq
role_names = extract_roles(role_names)
return nil if role_names.empty?
set_roles(select_valid_roles role_names)
end
# query assigned roles
alias_method :has?, :has_role?
alias_method :is?, :has_roles?
def has_only_role? arg
raise ArgumentError, "Must take only a single argument that is a role name" if arg.send(:size) > 1 && arg.kind_of?(Array)
has_roles? [arg].flatten.first
end
alias_method :has_only?, :has_only_role?
alias_method :is_only?, :has_only_role?
protected
def set_role role
self.send("#{role_attribute}=", new_role(role))
end
alias_method :set_roles, :set_role
def get_role
r = self.send(role_attribute)
respond_to?(:present_role) ? present_role(r) : r
end
def get_roles
r = self.send(role_attribute)
respond_to?(:present_roles) ? present_roles(r) : r
end
def set_roles *roles
self.send("#{role_attribute}=", new_roles(roles))
end
def roles_diff *roles
self.roles_list - extract_roles(roles.flat_uniq)
end
def select_valid_roles *role_names
role_names = role_names.flat_uniq.select{|role| valid_role? role }
has_role_class? ? role_class.find_roles(role_names).to_a : role_names
end
def has_role_class?
self.respond_to?(:role_class)
end
end
|
hashicorp/vagrant | lib/vagrant/box_collection.rb | Vagrant.BoxCollection.find | ruby | def find(name, providers, version)
providers = Array(providers)
# Build up the requirements we have
requirements = version.to_s.split(",").map do |v|
Gem::Requirement.new(v.strip)
end
with_collection_lock do
box_directory = @directory.join(dir_name(name))
if !box_directory.directory?
@logger.info("Box not found: #{name} (#{providers.join(", ")})")
return nil
end
# Keep a mapping of Gem::Version mangled versions => directories.
# ie. 0.1.0.pre.alpha.2 => 0.1.0-alpha.2
# This is so we can sort version numbers properly here, but still
# refer to the real directory names in path checks below and pass an
# unmangled version string to Box.new
version_dir_map = {}
versions = box_directory.children(true).map do |versiondir|
next if !versiondir.directory?
next if versiondir.basename.to_s.start_with?(".")
version = Gem::Version.new(versiondir.basename.to_s)
version_dir_map[version.to_s] = versiondir.basename.to_s
version
end.compact
# Traverse through versions with the latest version first
versions.sort.reverse.each do |v|
if !requirements.all? { |r| r.satisfied_by?(v) }
# Unsatisfied version requirements
next
end
versiondir = box_directory.join(version_dir_map[v.to_s])
providers.each do |provider|
provider_dir = versiondir.join(provider.to_s)
next if !provider_dir.directory?
@logger.info("Box found: #{name} (#{provider})")
metadata_url = nil
metadata_url_file = box_directory.join("metadata_url")
metadata_url = metadata_url_file.read if metadata_url_file.file?
if metadata_url && @hook
hook_env = @hook.call(
:authenticate_box_url, box_urls: [metadata_url])
metadata_url = hook_env[:box_urls].first
end
return Box.new(
name, provider, version_dir_map[v.to_s], provider_dir,
metadata_url: metadata_url,
)
end
end
end
nil
end | Find a box in the collection with the given name and provider.
@param [String] name Name of the box (logical name).
@param [Array] providers Providers that the box implements.
@param [String] version Version constraints to adhere to. Example:
"~> 1.0" or "= 1.0, ~> 1.1"
@return [Box] The box found, or `nil` if not found. | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/box_collection.rb#L272-L335 | class BoxCollection
TEMP_PREFIX = "vagrant-box-add-temp-".freeze
VAGRANT_SLASH = "-VAGRANTSLASH-".freeze
VAGRANT_COLON = "-VAGRANTCOLON-".freeze
# The directory where the boxes in this collection are stored.
#
# A box collection matches a very specific folder structure that Vagrant
# expects in order to easily manage and modify boxes. The folder structure
# is the following:
#
# COLLECTION_ROOT/BOX_NAME/PROVIDER/metadata.json
#
# Where:
#
# * COLLECTION_ROOT - This is the root of the box collection, and is
# the directory given to the initializer.
# * BOX_NAME - The name of the box. This is a logical name given by
# the user of Vagrant.
# * PROVIDER - The provider that the box was built for (VirtualBox,
# VMware, etc.).
# * metadata.json - A simple JSON file that at the bare minimum
# contains a "provider" key that matches the provider for the
# box. This metadata JSON, however, can contain anything.
#
# @return [Pathname]
attr_reader :directory
# Initializes the collection.
#
# @param [Pathname] directory The directory that contains the collection
# of boxes.
def initialize(directory, options=nil)
options ||= {}
@directory = directory
@hook = options[:hook]
@lock = Monitor.new
@temp_root = options[:temp_dir_root]
@logger = Log4r::Logger.new("vagrant::box_collection")
end
# This adds a new box to the system.
#
# There are some exceptional cases:
# * BoxAlreadyExists - The box you're attempting to add already exists.
# * BoxProviderDoesntMatch - If the given box provider doesn't match the
# actual box provider in the untarred box.
# * BoxUnpackageFailure - An invalid tar file.
#
# Preconditions:
# * File given in `path` must exist.
#
# @param [Pathname] path Path to the box file on disk.
# @param [String] name Logical name for the box.
# @param [String] version The version of this box.
# @param [Array<String>] providers The providers that this box can
# be a part of. This will be verified with the `metadata.json` and is
# meant as a basic check. If this isn't given, then whatever provider
# the box represents will be added.
# @param [Boolean] force If true, any existing box with the same name
# and provider will be replaced.
def add(path, name, version, **opts)
providers = opts[:providers]
providers = Array(providers) if providers
provider = nil
# A helper to check if a box exists. We store this in a variable
# since we call it multiple times.
check_box_exists = lambda do |box_formats|
box = find(name, box_formats, version)
next if !box
if !opts[:force]
@logger.error(
"Box already exists, can't add: #{name} v#{version} #{box_formats.join(", ")}")
raise Errors::BoxAlreadyExists,
name: name,
provider: box_formats.join(", "),
version: version
end
# We're forcing, so just delete the old box
@logger.info(
"Box already exists, but forcing so removing: " +
"#{name} v#{version} #{box_formats.join(", ")}")
box.destroy!
end
with_collection_lock do
log_provider = providers ? providers.join(", ") : "any provider"
@logger.debug("Adding box: #{name} (#{log_provider}) from #{path}")
# Verify the box doesn't exist early if we're given a provider. This
# can potentially speed things up considerably since we don't need
# to unpack any files.
check_box_exists.call(providers) if providers
# Create a temporary directory since we're not sure at this point if
# the box we're unpackaging already exists (if no provider was given)
with_temp_dir do |temp_dir|
# Extract the box into a temporary directory.
@logger.debug("Unpacking box into temporary directory: #{temp_dir}")
result = Util::Subprocess.execute(
"bsdtar", "--no-same-owner", "--no-same-permissions", "-v", "-x", "-m", "-s", "|\\\\\|/|", "-C", temp_dir.to_s, "-f", path.to_s)
if result.exit_code != 0
raise Errors::BoxUnpackageFailure,
output: result.stderr.to_s
end
# If we get a V1 box, we want to update it in place
if v1_box?(temp_dir)
@logger.debug("Added box is a V1 box. Upgrading in place.")
temp_dir = v1_upgrade(temp_dir)
end
# We re-wrap ourselves in the safety net in case we upgraded.
# If we didn't upgrade, then this is still safe because the
# helper will only delete the directory if it exists
with_temp_dir(temp_dir) do |final_temp_dir|
# Get an instance of the box we just added before it is finalized
# in the system so we can inspect and use its metadata.
box = Box.new(name, nil, version, final_temp_dir)
# Get the provider, since we'll need that to at the least add it
# to the system or check that it matches what is given to us.
box_provider = box.metadata["provider"]
if providers
found = providers.find { |p| p.to_sym == box_provider.to_sym }
if !found
@logger.error("Added box provider doesnt match expected: #{log_provider}")
raise Errors::BoxProviderDoesntMatch,
expected: log_provider, actual: box_provider
end
else
# Verify the box doesn't already exist
check_box_exists.call([box_provider])
end
# We weren't given a provider, so store this one.
provider = box_provider.to_sym
# Create the directory for this box, not including the provider
root_box_dir = @directory.join(dir_name(name))
box_dir = root_box_dir.join(version)
box_dir.mkpath
@logger.debug("Box directory: #{box_dir}")
# This is the final directory we'll move it to
final_dir = box_dir.join(provider.to_s)
if final_dir.exist?
@logger.debug("Removing existing provider directory...")
final_dir.rmtree
end
# Move to final destination
final_dir.mkpath
# Recursively move individual files from the temporary directory
# to the final location. We do this instead of moving the entire
# directory to avoid issues on Windows. [GH-1424]
copy_pairs = [[final_temp_dir, final_dir]]
while !copy_pairs.empty?
from, to = copy_pairs.shift
from.children(true).each do |f|
dest = to.join(f.basename)
# We don't copy entire directories, so create the
# directory and then add to our list to copy.
if f.directory?
dest.mkpath
copy_pairs << [f, dest]
next
end
# Copy the single file
@logger.debug("Moving: #{f} => #{dest}")
FileUtils.mv(f, dest)
end
end
if opts[:metadata_url]
root_box_dir.join("metadata_url").open("w") do |f|
f.write(opts[:metadata_url])
end
end
end
end
end
# Return the box
find(name, provider, version)
end
# This returns an array of all the boxes on the system, given by
# their name and their provider.
#
# @return [Array] Array of `[name, version, provider]` of the boxes
# installed on this system.
def all
results = []
with_collection_lock do
@logger.debug("Finding all boxes in: #{@directory}")
@directory.children(true).each do |child|
# Ignore non-directories, since files are not interesting to
# us in our folder structure.
next if !child.directory?
box_name = undir_name(child.basename.to_s)
# Otherwise, traverse the subdirectories and see what versions
# we have.
child.children(true).each do |versiondir|
next if !versiondir.directory?
next if versiondir.basename.to_s.start_with?(".")
version = versiondir.basename.to_s
versiondir.children(true).each do |provider|
# Ensure version of box is correct before continuing
if !Gem::Version.correct?(version)
ui = Vagrant::UI::Prefixed.new(Vagrant::UI::Colored.new, "vagrant")
ui.warn(I18n.t("vagrant.box_version_malformed",
version: version, box_name: box_name))
@logger.debug("Invalid version #{version} for box #{box_name}")
next
end
# Verify this is a potentially valid box. If it looks
# correct enough then include it.
if provider.directory? && provider.join("metadata.json").file?
provider_name = provider.basename.to_s.to_sym
@logger.debug("Box: #{box_name} (#{provider_name}, #{version})")
results << [box_name, version, provider_name]
else
@logger.debug("Invalid box #{box_name}, ignoring: #{provider}")
end
end
end
end
end
# Sort the list to group like providers and properly ordered versions
results.sort_by! do |box_result|
[box_result[0], box_result[2], Gem::Version.new(box_result[1])]
end
results
end
# Find a box in the collection with the given name and provider.
#
# @param [String] name Name of the box (logical name).
# @param [Array] providers Providers that the box implements.
# @param [String] version Version constraints to adhere to. Example:
# "~> 1.0" or "= 1.0, ~> 1.1"
# @return [Box] The box found, or `nil` if not found.
# This upgrades a v1.1 - v1.4 box directory structure up to a v1.5
# directory structure. This will raise exceptions if it fails in any
# way.
def upgrade_v1_1_v1_5
with_collection_lock do
temp_dir = Pathname.new(Dir.mktmpdir(TEMP_PREFIX, @temp_root))
@directory.children(true).each do |boxdir|
# Ignore all non-directories because they can't be boxes
next if !boxdir.directory?
box_name = boxdir.basename.to_s
# If it is a v1 box, then we need to upgrade it first
if v1_box?(boxdir)
upgrade_dir = v1_upgrade(boxdir)
FileUtils.mv(upgrade_dir, boxdir.join("virtualbox"))
end
# Create the directory for this box
new_box_dir = temp_dir.join(dir_name(box_name), "0")
new_box_dir.mkpath
# Go through each provider and move it
boxdir.children(true).each do |providerdir|
FileUtils.cp_r(providerdir, new_box_dir.join(providerdir.basename))
end
end
# Move the folder into place
@directory.rmtree
FileUtils.mv(temp_dir.to_s, @directory.to_s)
end
end
# Cleans the directory for a box by removing the folders that are
# empty.
def clean(name)
return false if exists?(name)
path = File.join(directory, dir_name(name))
FileUtils.rm_rf(path)
end
protected
# Returns the directory name for the box of the given name.
#
# @param [String] name
# @return [String]
def dir_name(name)
name = name.dup
name.gsub!(":", VAGRANT_COLON) if Util::Platform.windows?
name.gsub!("/", VAGRANT_SLASH)
name
end
# Returns the directory name for the box cleaned up
def undir_name(name)
name = name.dup
name.gsub!(VAGRANT_COLON, ":")
name.gsub!(VAGRANT_SLASH, "/")
name
end
# This checks if the given directory represents a V1 box on the
# system.
#
# @param [Pathname] dir Directory where the box is unpacked.
# @return [Boolean]
def v1_box?(dir)
# We detect a V1 box given by whether there is a "box.ovf" which
# is a heuristic but is pretty accurate.
dir.join("box.ovf").file?
end
# This upgrades the V1 box contained unpacked in the given directory
# and returns the directory of the upgraded version. This is
# _destructive_ to the contents of the old directory. That is, the
# contents of the old V1 box will be destroyed or moved.
#
# Preconditions:
# * `dir` is a valid V1 box. Verify with {#v1_box?}
#
# @param [Pathname] dir Directory where the V1 box is unpacked.
# @return [Pathname] Path to the unpackaged V2 box.
def v1_upgrade(dir)
@logger.debug("Upgrading box in directory: #{dir}")
temp_dir = Pathname.new(Dir.mktmpdir(TEMP_PREFIX, @temp_root))
@logger.debug("Temporary directory for upgrading: #{temp_dir}")
# Move all the things into the temporary directory
dir.children(true).each do |child|
# Don't move the temp_dir
next if child == temp_dir
# Move every other directory into the temporary directory
@logger.debug("Copying to upgrade directory: #{child}")
FileUtils.mv(child, temp_dir.join(child.basename))
end
# If there is no metadata.json file, make one, since this is how
# we determine if the box is a V2 box.
metadata_file = temp_dir.join("metadata.json")
if !metadata_file.file?
metadata_file.open("w") do |f|
f.write(JSON.generate({
provider: "virtualbox"
}))
end
end
# Return the temporary directory
temp_dir
end
# This locks the region given by the block with a lock on this
# collection.
def with_collection_lock
@lock.synchronize do
return yield
end
end
# This is a helper that makes sure that our temporary directories
# are cleaned up no matter what.
#
# @param [String] dir Path to a temporary directory
# @return [Object] The result of whatever the yield is
def with_temp_dir(dir=nil)
dir ||= Dir.mktmpdir(TEMP_PREFIX, @temp_root)
dir = Pathname.new(dir)
yield dir
ensure
FileUtils.rm_rf(dir.to_s)
end
# Checks if a box with a given name exists.
def exists?(box_name)
all.any? { |box| box.first.eql?(box_name) }
end
end
|
genaromadrid/email_checker | lib/email_checker/domain.rb | EmailChecker.Domain.mx_servers | ruby | def mx_servers
return @mx_servers if @mx_servers
@mx_servers = []
mx_records.each do |mx|
@mx_servers.push(preference: mx.preference, address: mx.exchange.to_s)
end
@mx_servers
end | The servers that this domian MX records point at.
@return [Array<Hash>] Array of type { preference: 1, address: '127.0.0.1' } | train | https://github.com/genaromadrid/email_checker/blob/34cae07ddf5bb86efff030d062e420d5aa15486a/lib/email_checker/domain.rb#L67-L74 | class Domain
# @return [String] the provided domain name.
attr_reader :domain
# Returns a new instance of Domain
#
# @param domain [String] The domain name.
#
# @example EmailChecker::Domain.new('google.com')
def initialize(domain)
@domain = domain
end
# Checks if the domian exists and has valid MX and A records.
#
# @return [Boolean]
def valid?
return false unless @domain
Timeout.timeout(SERVER_TIMEOUT) do
return true if valid_mx_records?
return true if a_records?
end
rescue Timeout::Error, Errno::ECONNREFUSED
false
end
# Check if the domian has valid MX records and it can receive emails.
# The MX server exists and it has valid A records.
#
# @return [Boolean]
def valid_mx_records?
mx_servers.each do |server|
exchange_a_records = dns.getresources(server[:address], Resolv::DNS::Resource::IN::A)
return true if exchange_a_records.any?
end
false
end
# Validates that has at least 1 A record.
# Check if the domain exists.
#
# @return [Boolean]
def a_records?
a_records.any?
end
# The A records for the domain.
#
# @return [Array<Resolv::DNS::Resource::IN::A>]
def a_records
@a_records ||= dns.getresources(@domain, Resolv::DNS::Resource::IN::A)
end
# The MX records of the domain.
#
# @return [Array<Resolv::DNS::Resource::IN::MX>]
def mx_records
@mx_records ||= dns.getresources(@domain, Resolv::DNS::Resource::IN::MX).sort_by(&:preference)
end
# The servers that this domian MX records point at.
#
# @return [Array<Hash>] Array of type { preference: 1, address: '127.0.0.1' }
private
def dns
@dns ||= Resolv::DNS.new
end
end
|
dennisreimann/masq | app/controllers/masq/server_controller.rb | Masq.ServerController.ensure_valid_checkid_request | ruby | def ensure_valid_checkid_request
self.openid_request = checkid_request
if !openid_request.is_a?(OpenID::Server::CheckIDRequest)
redirect_to root_path, :alert => t(:identity_verification_request_invalid)
elsif !allow_verification?
flash[:notice] = logged_in? && !pape_requirements_met?(auth_time) ?
t(:service_provider_requires_reauthentication_last_login_too_long_ago) :
t(:login_to_verify_identity)
session[:return_to] = proceed_path
redirect_to login_path
end
end | Use this as before_filter for every CheckID request based action.
Loads the current openid request and cancels if none can be found.
The user has to log in, if he has not verified his ownership of
the identifier, yet. | train | https://github.com/dennisreimann/masq/blob/bc6b6d84fe06811b9de19e7863c53c6bfad201fe/app/controllers/masq/server_controller.rb#L157-L168 | class ServerController < BaseController
# CSRF-protection must be skipped, because incoming
# OpenID requests lack an authenticity token
skip_before_filter :verify_authenticity_token
# Error handling
rescue_from OpenID::Server::ProtocolError, :with => :render_openid_error
# Actions other than index require a logged in user
before_filter :login_required, :except => [:index, :cancel, :seatbelt_config, :seatbelt_login_state]
before_filter :ensure_valid_checkid_request, :except => [:index, :cancel, :seatbelt_config, :seatbelt_login_state]
after_filter :clear_checkid_request, :only => [:cancel, :complete]
# These methods are used to display information about the request to the user
helper_method :sreg_request, :ax_fetch_request, :ax_store_request
# This is the server endpoint which handles all incoming OpenID requests.
# Associate and CheckAuth requests are answered directly - functionality
# therefor is provided by the ruby-openid gem. Handling of CheckId requests
# dependents on the users login state (see handle_checkid_request).
# Yadis requests return information about this endpoint.
def index
clear_checkid_request
respond_to do |format|
format.html do
if openid_request.is_a?(OpenID::Server::CheckIDRequest)
handle_checkid_request
elsif openid_request
handle_non_checkid_request
else
render :text => t(:this_is_openid_not_a_human_ressource)
end
end
format.xrds
end
end
# This action decides how to process the current request and serves as
# dispatcher and re-entry in case the request could not be processed
# directly (for instance if the user had to log in first).
# When the user has already trusted the relying party, the request will
# be answered based on the users release policy. If the request is immediate
# (relying party wants no user interaction, used e.g. for ajax requests)
# the request can only be answered if no further information (like simple
# registration data) is requested. Otherwise the user will be redirected
# to the decision page.
def proceed
identity = identifier(current_account)
if @site = current_account.sites.find_by_url(checkid_request.trust_root)
resp = checkid_request.answer(true, nil, identity)
resp = add_sreg(resp, @site.sreg_properties) if sreg_request
resp = add_ax(resp, @site.ax_properties) if ax_fetch_request
resp = add_pape(resp, auth_policies, auth_level, auth_time)
render_response(resp)
elsif checkid_request.immediate && (sreg_request || ax_store_request || ax_fetch_request)
render_response(checkid_request.answer(false))
elsif checkid_request.immediate
render_response(checkid_request.answer(true, nil, identity))
else
redirect_to decide_path
end
end
# Displays the decision page on that the user can confirm the request and
# choose which data should be transfered to the relying party.
def decide
@site = current_account.sites.find_or_initialize_by_url(checkid_request.trust_root)
@site.persona = current_account.personas.find(params[:persona_id] || :first) if sreg_request || ax_store_request || ax_fetch_request
end
# This action is called by submitting the decision form, the information entered by
# the user is used to answer the request. If the user decides to always trust the
# relying party, a new site according to the release policies the will be created.
def complete
if params[:cancel]
cancel
else
resp = checkid_request.answer(true, nil, identifier(current_account))
if params[:always]
@site = current_account.sites.find_or_create_by_persona_id_and_url(params[:site][:persona_id], params[:site][:url])
@site.update_attributes(params[:site])
elsif sreg_request || ax_fetch_request
@site = current_account.sites.find_or_initialize_by_persona_id_and_url(params[:site][:persona_id], params[:site][:url])
@site.attributes = params[:site]
elsif ax_store_request
@site = current_account.sites.find_or_initialize_by_persona_id_and_url(params[:site][:persona_id], params[:site][:url])
not_supported, not_accepted, accepted = [], [], []
ax_store_request.data.each do |type_uri, values|
if property = Persona.attribute_name_for_type_uri(type_uri)
store_attribute = params[:site][:ax_store][property.to_sym]
if store_attribute && !store_attribute[:value].blank?
@site.persona.update_attribute(property, values.first)
accepted << type_uri
else
not_accepted << type_uri
end
else
not_supported << type_uri
end
end
ax_store_response = (accepted.count > 0) ? OpenID::AX::StoreResponse.new : OpenID::AX::StoreResponse.new(false, "None of the attributes were accepted.")
resp.add_extension(ax_store_response)
end
resp = add_pape(resp, auth_policies, auth_level, auth_time)
resp = add_sreg(resp, @site.sreg_properties) if sreg_request && @site.sreg_properties
resp = add_ax(resp, @site.ax_properties) if ax_fetch_request && @site.ax_properties
render_response(resp)
end
end
# Cancels the current OpenID request
def cancel
redirect_to checkid_request.cancel_url
end
protected
# Decides how to process an incoming checkid request. If the user is
# already logged in he will be forwarded to the proceed action. If
# the user is not logged in and the request is immediate, the request
# cannot be answered successfully. In case the user is not logged in,
# the request will be stored and the user is asked to log in.
def handle_checkid_request
if allow_verification?
save_checkid_request
redirect_to proceed_path
elsif openid_request.immediate
render_response(openid_request.answer(false))
else
reset_session
request = save_checkid_request
session[:return_to] = proceed_path
redirect_to( request.from_trusted_domain? ? login_path : safe_login_path )
end
end
# Stores the current OpenID request.
# Returns the OpenIdRequest
def save_checkid_request
clear_checkid_request
request = OpenIdRequest.create!(:parameters => openid_params)
session[:request_token] = request.token
request
end
# Deletes the old request when a new one comes in.
def clear_checkid_request
unless session[:request_token].blank?
OpenIdRequest.destroy_all :token => session[:request_token]
session[:request_token] = nil
end
end
# Use this as before_filter for every CheckID request based action.
# Loads the current openid request and cancels if none can be found.
# The user has to log in, if he has not verified his ownership of
# the identifier, yet.
# The user must be logged in, he must be the owner of the claimed identifier
# and the PAPE requirements must be met if applicable.
def allow_verification?
logged_in? && correct_identifier? && pape_requirements_met?(auth_time)
end
# Is the user allowed to verify the claimed identifier? The user
# must be logged in, so that we know his identifier or the identifier
# has to be selected by the server (id_select).
def correct_identifier?
(openid_request.identity == identifier(current_account) || openid_request.id_select)
end
# Clears the stored request and answers
def render_response(resp)
clear_checkid_request
render_openid_response(resp)
end
# Transforms the parameters from the form to valid AX response values
def transform_ax_data(parameters)
data = {}
parameters.each_pair do |key, details|
if details['value']
data["type.#{key}"] = details['type']
data["value.#{key}"] = details['value']
end
end
data
end
# Renders the exception message as text output
def render_openid_error(exception)
error = case exception
when OpenID::Server::MalformedTrustRoot then "Malformed trust root '#{exception.to_s}'"
else exception.to_s
end
render :text => h("Invalid OpenID request: #{error}"), :status => 500
end
private
# The NIST Assurance Level, see:
# http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#anchor12
def auth_level
if Masq::Engine.config.masq['use_ssl']
current_account.last_authenticated_with_yubikey? ? 3 : 2
else
0
end
end
def auth_time
current_account.last_authenticated_at
end
def auth_policies
current_account.last_authenticated_with_yubikey? ?
[OpenID::PAPE::AUTH_MULTI_FACTOR, OpenID::PAPE::AUTH_PHISHING_RESISTANT] :
[]
end
end
|
spiegela/MultiBitField | lib/multi_bit_field.rb | MultiBitField.InstanceMethods.get_bits_for | ruby | def get_bits_for(column_name, field)
return nil if self[column_name].nil?
only = self.class.only_mask_for column_name, field
inc = self.class.increment_mask_for column_name, field
(self[column_name] & only)/inc
end | :nodoc: | train | https://github.com/spiegela/MultiBitField/blob/53674ba73caea8d871510d02271b71edaa1335f1/lib/multi_bit_field.rb#L236-L241 | module InstanceMethods
# Sets one or more bitfields to 0 within a column
#
# +reset_bitfield :column, :fields
#
# @example
# user.reset_bitfield :column, :daily, :monthly
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) to reset
def reset_bitfields column_name, *fields
mask = self.class.reset_mask_for column_name, *fields
self[column_name] = self[column_name] & mask
save
end
alias :reset_bitfield :reset_bitfields
# Increases one or more bitfields by 1 value
#
# +increment_bitfield :column, :fields
#
# @example
# user.increment_bitfield :column, :daily, :monthly
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) to reset
def increment_bitfields column_name, *fields
mask = self.class.increment_mask_for column_name, *fields
self[column_name] = self[column_name] += mask
save
end
alias :increment_bitfield :increment_bitfields
private
# :nodoc:
# :nodoc:
def set_bits_for(column_name, field, value)
if self.class.range_for(column_name, field).sum{|i| 2 ** i} < value
raise ArgumentError, "Value: #{value} too large for bitfield filum"
end
self[column_name] ||= 0
reset_mask = self.class.reset_mask_for(column_name, field)
inc = self.class.increment_mask_for(column_name, field)
self[column_name] = (self[column_name] & reset_mask) | (value * inc)
end
end
|
spikegrobstein/capnotify | lib/capnotify/component.rb | Capnotify.Component.render_content | ruby | def render_content(format)
begin
ERB.new( File.open( template_path_for(format) ).read, nil, '%<>' ).result(self.get_binding)
rescue TemplateUndefined
''
end
end | FIXME: this should probably leverage Procs for rendering of different types, maybe?
that would give a lot of power to a developer who wants a custom format for a plugin (eg XML or JSON)
Render the content in the given format using the right built-in template. Returns the content as a string.
In the event that there is not a valid template, return an empty string. | train | https://github.com/spikegrobstein/capnotify/blob/b21ea876ae2a04e8090206a687436565051f0e08/lib/capnotify/component.rb#L52-L58 | class Component
class TemplateUndefined < StandardError; end
attr_accessor :header, :name
# the class(s) for this component (as a string)
attr_accessor :css_class, :custom_css
# a block that will configure this instance lazily
attr_reader :builder
attr_accessor :template_path, :renderers
attr_accessor :config
def initialize(name, options={}, &block)
@name = name.to_sym
# default stuff
@template_path = File.join( File.dirname(__FILE__), 'templates' )
@renderers = {
:html => '_component.html.erb',
:txt => '_component.txt.erb'
}
@header = options[:header]
@css_class = options[:css_class] || 'section'
@custom_css = options[:custom_css]
if block_given?
@builder = block
end
end
# assign the content as new_content
def content=(new_content)
@content = new_content
end
def content
@content
end
# FIXME: this should probably leverage Procs for rendering of different types, maybe?
# that would give a lot of power to a developer who wants a custom format for a plugin (eg XML or JSON)
# Render the content in the given format using the right built-in template. Returns the content as a string.
# In the event that there is not a valid template, return an empty string.
# return the binding for this object
# this is needed when embedding ERB templates in each other
def get_binding
binding
end
# set the template path for this particular instance
# the template path is the path to the parent directory of a renderer ERB template
def template_path_for(format)
raise TemplateUndefined, "Template for #{ format } is missing!" if @renderers[format].nil?
File.join( @template_path, @renderers[format] )
end
# create renderers
# given a key for the format, provide the name of the ERB template to use to render relative to the template path
def render_for(renderers={})
@renderers = @renderers.merge(renderers)
end
# call @builder with self as a param if @builder is present
# ensure builder is nil
# then return self
def build!(config)
@builder.call(self) unless @builder.nil?
@builder = nil
@config = config
return self
end
end
|
chaintope/bitcoinrb | lib/bitcoin/script/script.rb | Bitcoin.Script.subscript_codeseparator | ruby | def subscript_codeseparator(separator_index)
buf = []
process_separator_index = 0
chunks.each{|chunk|
buf << chunk if process_separator_index == separator_index
if chunk.ord == OP_CODESEPARATOR && process_separator_index < separator_index
process_separator_index += 1
end
}
buf.join
end | Returns a script that deleted the script before the index specified by separator_index. | train | https://github.com/chaintope/bitcoinrb/blob/39396e4c9815214d6b0ab694fa8326978a7f5438/lib/bitcoin/script/script.rb#L462-L472 | class Script
include Bitcoin::Opcodes
attr_accessor :chunks
def initialize
@chunks = []
end
# generate P2PKH script
def self.to_p2pkh(pubkey_hash)
new << OP_DUP << OP_HASH160 << pubkey_hash << OP_EQUALVERIFY << OP_CHECKSIG
end
# generate P2WPKH script
def self.to_p2wpkh(pubkey_hash)
new << WITNESS_VERSION << pubkey_hash
end
# generate m of n multisig p2sh script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script, Script] first element is p2sh script, second one is redeem script.
def self.to_p2sh_multisig_script(m, pubkeys)
redeem_script = to_multisig_script(m, pubkeys)
[redeem_script.to_p2sh, redeem_script]
end
# generate p2sh script.
# @param [String] script_hash script hash for P2SH
# @return [Script] P2SH script
def self.to_p2sh(script_hash)
Script.new << OP_HASH160 << script_hash << OP_EQUAL
end
# generate p2sh script with this as a redeem script
# @return [Script] P2SH script
def to_p2sh
Script.to_p2sh(to_hash160)
end
def get_multisig_pubkeys
num = Bitcoin::Opcodes.opcode_to_small_int(chunks[-2].bth.to_i(16))
(1..num).map{ |i| chunks[i].pushed_data }
end
# generate m of n multisig script
# @param [String] m the number of signatures required for multisig
# @param [Array] pubkeys array of public keys that compose multisig
# @return [Script] multisig script.
def self.to_multisig_script(m, pubkeys)
new << m << pubkeys << pubkeys.size << OP_CHECKMULTISIG
end
# generate p2wsh script for +redeem_script+
# @param [Script] redeem_script target redeem script
# @param [Script] p2wsh script
def self.to_p2wsh(redeem_script)
new << WITNESS_VERSION << redeem_script.to_sha256
end
# generate script from string.
def self.from_string(string)
script = new
string.split(' ').each do |v|
opcode = Opcodes.name_to_opcode(v)
if opcode
script << (v =~ /^\d/ && Opcodes.small_int_to_opcode(v.ord) ? v.ord : opcode)
else
script << (v =~ /^[0-9]+$/ ? v.to_i : v)
end
end
script
end
# generate script from addr.
# @param [String] addr address.
# @return [Bitcoin::Script] parsed script.
def self.parse_from_addr(addr)
begin
segwit_addr = Bech32::SegwitAddr.new(addr)
raise 'Invalid hrp.' unless Bitcoin.chain_params.bech32_hrp == segwit_addr.hrp
Bitcoin::Script.parse_from_payload(segwit_addr.to_script_pubkey.htb)
rescue Exception => e
hex, addr_version = Bitcoin.decode_base58_address(addr)
case addr_version
when Bitcoin.chain_params.address_version
Bitcoin::Script.to_p2pkh(hex)
when Bitcoin.chain_params.p2sh_version
Bitcoin::Script.to_p2sh(hex)
else
throw e
end
end
end
def self.parse_from_payload(payload)
s = new
buf = StringIO.new(payload)
until buf.eof?
opcode = buf.read(1)
if opcode.pushdata?
pushcode = opcode.ord
packed_size = nil
len = case pushcode
when OP_PUSHDATA1
packed_size = buf.read(1)
packed_size.unpack('C').first
when OP_PUSHDATA2
packed_size = buf.read(2)
packed_size.unpack('v').first
when OP_PUSHDATA4
packed_size = buf.read(4)
packed_size.unpack('V').first
else
pushcode if pushcode < OP_PUSHDATA1
end
if len
s.chunks << [len].pack('C') if buf.eof?
unless buf.eof?
chunk = (packed_size ? (opcode + packed_size) : (opcode)) + buf.read(len)
s.chunks << chunk
end
end
else
if Opcodes.defined?(opcode.ord)
s << opcode.ord
else
s.chunks << (opcode + buf.read) # If opcode is invalid, put all remaining data in last chunk.
end
end
end
s
end
def to_payload
chunks.join
end
def to_hex
to_payload.bth
end
def empty?
chunks.size == 0
end
def addresses
return [p2pkh_addr] if p2pkh?
return [p2sh_addr] if p2sh?
return [bech32_addr] if witness_program?
return get_multisig_pubkeys.map{|pubkey| Bitcoin::Key.new(pubkey: pubkey.bth).to_p2pkh} if multisig?
[]
end
# check whether standard script.
def standard?
p2pkh? | p2sh? | p2wpkh? | p2wsh? | multisig? | standard_op_return?
end
# whether this script is a P2PKH format script.
def p2pkh?
return false unless chunks.size == 5
[OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG] ==
(chunks[0..1]+ chunks[3..4]).map(&:ord) && chunks[2].bytesize == 21
end
# whether this script is a P2WPKH format script.
def p2wpkh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 21
end
def p2wsh?
return false unless chunks.size == 2
chunks[0].ord == WITNESS_VERSION && chunks[1].bytesize == 33
end
def p2sh?
return false unless chunks.size == 3
OP_HASH160 == chunks[0].ord && OP_EQUAL == chunks[2].ord && chunks[1].bytesize == 21
end
def multisig?
return false if chunks.size < 4 || chunks.last.ord != OP_CHECKMULTISIG
pubkey_count = Opcodes.opcode_to_small_int(chunks[-2].opcode)
sig_count = Opcodes.opcode_to_small_int(chunks[0].opcode)
return false unless pubkey_count || sig_count
sig_count <= pubkey_count
end
def op_return?
chunks.size >= 1 && chunks[0].ord == OP_RETURN
end
def standard_op_return?
op_return? && size <= MAX_OP_RETURN_RELAY &&
(chunks.size == 1 || chunks[1].opcode <= OP_16)
end
def op_return_data
return nil unless op_return?
return nil if chunks.size == 1
chunks[1].pushed_data
end
# whether data push only script which dose not include other opcode
def push_only?
chunks.each do |c|
return false if !c.opcode.nil? && c.opcode > OP_16
end
true
end
# A witness program is any valid Script that consists of a 1-byte push opcode followed by a data push between 2 and 40 bytes.
def witness_program?
return false if size < 4 || size > 42 || chunks.size < 2
opcode = chunks[0].opcode
return false if opcode != OP_0 && (opcode < OP_1 || opcode > OP_16)
return false unless chunks[1].pushdata?
if size == (chunks[1][0].unpack('C').first + 2)
program_size = chunks[1].pushed_data.bytesize
return program_size >= 2 && program_size <= 40
end
false
end
# get witness commitment
def witness_commitment
return nil if !op_return? || op_return_data.bytesize < 36
buf = StringIO.new(op_return_data)
return nil unless buf.read(4).bth == WITNESS_COMMITMENT_HEADER
buf.read(32).bth
end
# If this script is witness program, return its script code,
# otherwise returns the self payload. ScriptInterpreter does not use this.
def to_script_code(skip_separator_index = 0)
payload = to_payload
if p2wpkh?
payload = Script.to_p2pkh(chunks[1].pushed_data.bth).to_payload
elsif skip_separator_index > 0
payload = subscript_codeseparator(skip_separator_index)
end
Bitcoin.pack_var_string(payload)
end
# get witness version and witness program
def witness_data
version = opcode_to_small_int(chunks[0].opcode)
program = chunks[1].pushed_data
[version, program]
end
# append object to payload
def <<(obj)
if obj.is_a?(Integer)
push_int(obj)
elsif obj.is_a?(String)
append_data(obj)
elsif obj.is_a?(Array)
obj.each { |o| self.<< o}
self
end
end
# push integer to stack.
def push_int(n)
begin
append_opcode(n)
rescue ArgumentError
append_data(Script.encode_number(n))
end
self
end
# append opcode to payload
# @param [Integer] opcode append opcode which defined by Bitcoin::Opcodes
# @return [Script] return self
def append_opcode(opcode)
opcode = Opcodes.small_int_to_opcode(opcode) if -1 <= opcode && opcode <= 16
raise ArgumentError, "specified invalid opcode #{opcode}." unless Opcodes.defined?(opcode)
chunks << opcode.chr
self
end
# append data to payload with pushdata opcode
# @param [String] data append data. this data is not binary
# @return [Script] return self
def append_data(data)
data = Encoding::ASCII_8BIT == data.encoding ? data : data.htb
chunks << Bitcoin::Script.pack_pushdata(data)
self
end
# Check the item is in the chunk of the script.
def include?(item)
chunk_item = if item.is_a?(Integer)
item.chr
elsif item.is_a?(String)
data = Encoding::ASCII_8BIT == item.encoding ? item : item.htb
Bitcoin::Script.pack_pushdata(data)
end
return false unless chunk_item
chunks.include?(chunk_item)
end
def to_s
chunks.map { |c|
case c
when Integer
opcode_to_name(c)
when String
if c.pushdata?
v = Opcodes.opcode_to_small_int(c.ord)
if v
v
else
data = c.pushed_data
if data.bytesize <= 4
Script.decode_number(data.bth) # for scriptnum
else
data.bth
end
end
else
opcode = Opcodes.opcode_to_name(c.ord)
opcode ? opcode : 'OP_UNKNOWN [error]'
end
end
}.join(' ')
end
# generate sha-256 hash for payload
def to_sha256
Bitcoin.sha256(to_payload).bth
end
# generate hash160 hash for payload
def to_hash160
Bitcoin.hash160(to_payload.bth)
end
# script size
def size
to_payload.bytesize
end
# execute script interpreter using this script for development.
def run
Bitcoin::ScriptInterpreter.eval(Bitcoin::Script.new, self.dup)
end
# encode int value to script number hex.
# The stacks hold byte vectors.
# When used as numbers, byte vectors are interpreted as little-endian variable-length integers
# with the most significant bit determining the sign of the integer.
# Thus 0x81 represents -1. 0x80 is another representation of zero (so called negative 0).
# Positive 0 is represented by a null-length vector.
# Byte vectors are interpreted as Booleans where False is represented by any representation of zero,
# and True is represented by any representation of non-zero.
def self.encode_number(i)
return '' if i == 0
negative = i < 0
hex = i.abs.to_even_length_hex
hex = '0' + hex unless (hex.length % 2).zero?
v = hex.htb.reverse # change endian
v = v << (negative ? 0x80 : 0x00) unless (v[-1].unpack('C').first & 0x80) == 0
v[-1] = [v[-1].unpack('C').first | 0x80].pack('C') if negative
v.bth
end
# decode script number hex to int value
def self.decode_number(s)
v = s.htb.reverse
return 0 if v.length.zero?
mbs = v[0].unpack('C').first
v[0] = [mbs - 0x80].pack('C') unless (mbs & 0x80) == 0
result = v.bth.to_i(16)
result = -result unless (mbs & 0x80) == 0
result
end
# binary +data+ convert pushdata which contains data length and append PUSHDATA opcode if necessary.
def self.pack_pushdata(data)
size = data.bytesize
header = if size < OP_PUSHDATA1
[size].pack('C')
elsif size < 0xff
[OP_PUSHDATA1, size].pack('CC')
elsif size < 0xffff
[OP_PUSHDATA2, size].pack('Cv')
elsif size < 0xffffffff
[OP_PUSHDATA4, size].pack('CV')
else
raise ArgumentError, 'data size is too big.'
end
header + data
end
# subscript this script to the specified range.
def subscript(*args)
s = self.class.new
s.chunks = chunks[*args]
s
end
# removes chunks matching subscript byte-for-byte and returns as a new object.
def find_and_delete(subscript)
raise ArgumentError, 'subscript must be Bitcoin::Script' unless subscript.is_a?(Script)
return self if subscript.chunks.empty?
buf = []
i = 0
result = Script.new
chunks.each do |chunk|
sub_chunk = subscript.chunks[i]
if chunk.start_with?(sub_chunk)
if chunk == sub_chunk
buf << chunk
i += 1
(i = 0; buf.clear) if i == subscript.chunks.size # matched the whole subscript
else # matched the part of head
i = 0
tmp = chunk.dup
tmp.slice!(sub_chunk)
result.chunks << tmp
end
else
result.chunks << buf.join unless buf.empty?
if buf.first == chunk
i = 1
buf = [chunk]
else
i = 0
result.chunks << chunk
end
end
end
result
end
# remove all occurences of opcode. Typically it's OP_CODESEPARATOR.
def delete_opcode(opcode)
@chunks = chunks.select{|chunk| chunk.ord != opcode}
self
end
# Returns a script that deleted the script before the index specified by separator_index.
def ==(other)
return false unless other
chunks == other.chunks
end
def type
return 'pubkeyhash' if p2pkh?
return 'scripthash' if p2sh?
return 'multisig' if multisig?
return 'witness_v0_keyhash' if p2wpkh?
return 'witness_v0_scripthash' if p2wsh?
'nonstandard'
end
def to_h
h = {asm: to_s, hex: to_payload.bth, type: type}
addrs = addresses
unless addrs.empty?
h[:req_sigs] = multisig? ? Bitcoin::Opcodes.opcode_to_small_int(chunks[0].bth.to_i(16)) :addrs.size
h[:addresses] = addrs
end
h
end
private
# generate p2pkh address. if script dose not p2pkh, return nil.
def p2pkh_addr
return nil unless p2pkh?
hash160 = chunks[2].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.address_version)
end
# generate p2wpkh address. if script dose not p2wpkh, return nil.
def p2wpkh_addr
p2wpkh? ? bech32_addr : nil
end
# generate p2sh address. if script dose not p2sh, return nil.
def p2sh_addr
return nil unless p2sh?
hash160 = chunks[1].pushed_data.bth
return nil unless hash160.htb.bytesize == 20
Bitcoin.encode_base58_address(hash160, Bitcoin.chain_params.p2sh_version)
end
# generate p2wsh address. if script dose not p2wsh, return nil.
def p2wsh_addr
p2wsh? ? bech32_addr : nil
end
# return bech32 address for payload
def bech32_addr
segwit_addr = Bech32::SegwitAddr.new
segwit_addr.hrp = Bitcoin.chain_params.bech32_hrp
segwit_addr.script_pubkey = to_payload.bth
segwit_addr.addr
end
end
|
sds/haml-lint | lib/haml_lint/configuration.rb | HamlLint.Configuration.ensure_linter_include_exclude_arrays_exist | ruby | def ensure_linter_include_exclude_arrays_exist
@hash['linters'].each_key do |linter_name|
%w[include exclude].each do |option|
linter_config = @hash['linters'][linter_name]
linter_config[option] = Array(linter_config[option])
end
end
end | Ensure `include` and `exclude` options for linters are arrays
(since users can specify a single string glob pattern for convenience) | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/configuration.rb#L116-L123 | class Configuration
# Internal hash storing the configuration.
attr_reader :hash
# Creates a configuration from the given options hash.
#
# @param options [Hash]
def initialize(options, file = nil)
@hash = options
@config_dir = file ? File.dirname(file) : nil
resolve_requires
validate
end
# Access the configuration as if it were a hash.
#
# @param key [String]
# @return [Array,Hash,Number,String]
def [](key)
@hash[key]
end
# Compares this configuration with another.
#
# @param other [HamlLint::Configuration]
# @return [true,false] whether the given configuration is equivalent
def ==(other)
super || @hash == other.hash
end
# Returns a non-modifiable configuration for the specified linter.
#
# @param linter [HamlLint::Linter,Class]
def for_linter(linter)
linter_name =
case linter
when Class
linter.name.split('::').last
when HamlLint::Linter
linter.name
end
@hash['linters'].fetch(linter_name, {}).dup.freeze
end
# Merges the given configuration with this one, returning a new
# {Configuration}. The provided configuration will either add to or replace
# any options defined in this configuration.
#
# @param config [HamlLint::Configuration]
def merge(config)
self.class.new(smart_merge(@hash, config.hash))
end
private
# Merge two hashes such that nested hashes are merged rather than replaced.
#
# @param parent [Hash]
# @param child [Hash]
# @return [Hash]
def smart_merge(parent, child)
parent.merge(child) do |_key, old, new|
case old
when Hash
smart_merge(old, new)
else
new
end
end
end
# Requires any extra linters / files specified in the configuration.
# String starting with a . are treated as relative paths
def resolve_requires
relative_require_dir = @config_dir || Dir.pwd
Array(@hash['require']).each do |r|
if r.start_with?('.')
require File.join(relative_require_dir, r)
else
require r
end
end
end
# Validates the configuration for any invalid options, normalizing it where
# possible.
def validate
ensure_exclude_option_array_exists
ensure_linter_section_exists
ensure_linter_include_exclude_arrays_exist
ensure_linter_severity_valid
end
# Ensures the `exclude` global option is an array.
def ensure_exclude_option_array_exists
@hash['exclude'] = Array(@hash['exclude'])
end
# Ensures the `linters` configuration section exists.
def ensure_linter_section_exists
@hash['linters'] ||= {}
end
# Ensure `include` and `exclude` options for linters are arrays
# (since users can specify a single string glob pattern for convenience)
def ensure_linter_severity_valid
@hash['linters'].each do |linter_name, linter_config|
severity = linter_config['severity']
unless [nil, 'warning', 'error'].include?(severity)
raise HamlLint::Exceptions::ConfigurationError,
"Invalid severity '#{severity}' specified for #{linter_name}"
end
end
end
end
|
dotless-de/vagrant-vbguest | lib/vagrant-vbguest/command.rb | VagrantVbguest.Command.execute | ruby | def execute
options = {
:_method => :run,
:_rebootable => true,
:auto_reboot => false
}
opts = OptionParser.new do |opts|
opts.banner = "Usage: vagrant vbguest [vm-name] "\
"[--do start|rebuild|install] "\
"[--status] "\
"[-f|--force] "\
"[-b|--auto-reboot] "\
"[-R|--no-remote] "\
"[--iso VBoxGuestAdditions.iso] "\
"[--no-cleanup]"
opts.separator ""
opts.on("--do COMMAND", [:start, :rebuild, :install], "Manually `start`, `rebuild` or `install` GuestAdditions.") do |command|
options[:_method] = command
options[:force] = true
end
opts.on("--status", "Print current GuestAdditions status and exit.") do
options[:_method] = :status
options[:_rebootable] = false
end
opts.on("-f", "--force", "Whether to force the installation. (Implied by --do start|rebuild|install)") do
options[:force] = true
end
opts.on("--auto-reboot", "-b", "Allow rebooting the VM after installation. (when GuestAdditions won't start)") do
options[:auto_reboot] = true
end
opts.on("--no-remote", "-R", "Do not attempt do download the iso file from a webserver") do
options[:no_remote] = true
end
opts.on("--iso file_or_uri", "Full path or URI to the VBoxGuestAdditions.iso") do |file_or_uri|
options[:iso_path] = file_or_uri
end
opts.on("--no-cleanup", "Do not run cleanup tasks after installation. (for debugging)") do
options[:no_cleanup] = true
end
build_start_options(opts, options)
end
argv = parse_options(opts)
return if !argv
if argv.empty?
with_target_vms(nil) { |vm| execute_on_vm(vm, options) }
else
argv.each do |vm_name|
with_target_vms(vm_name) { |vm| execute_on_vm(vm, options) }
end
end
end | Runs the vbguest installer on the VMs that are represented
by this environment. | train | https://github.com/dotless-de/vagrant-vbguest/blob/934fd22864c811c951c020cfcfc5c2ef9d79d5ef/lib/vagrant-vbguest/command.rb#L12-L74 | class Command < Vagrant.plugin("2", :command)
include VagrantPlugins::CommandUp::StartMixins
include VagrantVbguest::Helpers::Rebootable
# Runs the vbguest installer on the VMs that are represented
# by this environment.
# Show description when `vagrant list-commands` is triggered
def self.synopsis
"plugin: vagrant-vbguest: install VirtualBox Guest Additions to the machine"
end
protected
# Executes a task on a specific VM.
#
# @param vm [Vagrant::VM]
# @param options [Hash] Parsed options from the command line
def execute_on_vm(vm, options)
check_runable_on(vm)
options = options.clone
_method = options.delete(:_method)
_rebootable = options.delete(:_rebootable)
options = vm.config.vbguest.to_hash.merge(options)
machine = VagrantVbguest::Machine.new(vm, options)
status = machine.state
vm.env.ui.send((:ok == status ? :success : :warn), I18n.t("vagrant_vbguest.status.#{status}", machine.info))
if _method != :status
machine.send(_method)
end
reboot!(vm, options) if _rebootable && machine.reboot?
rescue VagrantVbguest::Installer::NoInstallerFoundError => e
vm.env.ui.error e.message
end
def check_runable_on(vm)
raise Vagrant::Errors::VMNotCreatedError if vm.state.id == :not_created
raise Vagrant::Errors::VMInaccessible if vm.state.id == :inaccessible
raise Vagrant::Errors::VMNotRunningError if vm.state.id != :running
raise VagrantVbguest::NoVirtualBoxMachineError if vm.provider.class != VagrantPlugins::ProviderVirtualBox::Provider
end
end
|
documentcloud/cloud-crowd | lib/cloud_crowd/action.rb | CloudCrowd.Action.` | ruby | def `(command)
result = super(command)
exit_code = $?.to_i
raise Error::CommandFailed.new(result, exit_code) unless exit_code == 0
result
end | Actions have a backticks command that raises a CommandFailed exception
on failure, so that processing doesn't just blithely continue. | train | https://github.com/documentcloud/cloud-crowd/blob/a66172eabc6cb526b27be2bb821e2ea4258c82d4/lib/cloud_crowd/action.rb#L74-L79 | class Action
FILE_URL = /\Afile:\/\//
attr_reader :input, :input_path, :file_name, :options, :work_directory
# Initializing an Action sets up all of the read-only variables that
# form the bulk of the API for action subclasses. (Paths to read from and
# write to). It creates the +work_directory+ and moves into it.
# If we're not merging multiple results, it downloads the input file into
# the +work_directory+ before starting.
def initialize(status, input, options, store)
@input, @options, @store = input, options, store
@job_id, @work_unit_id = options['job_id'], options['work_unit_id']
@work_directory = File.expand_path(File.join(@store.temp_storage_path, local_storage_prefix))
FileUtils.mkdir_p(@work_directory) unless File.exists?(@work_directory)
parse_input
download_input
end
# Each Action subclass must implement a +process+ method, overriding this.
def process
raise NotImplementedError, "CloudCrowd::Actions must override 'process' with their own processing code."
end
# Download a file to the specified path.
def download(url, path)
if url.match(FILE_URL)
FileUtils.cp(url.sub(FILE_URL, ''), path)
else
File.open(path, 'w+') do |file|
Net::HTTP.get_response(URI(url)) do |response|
response.read_body do |chunk|
file.write chunk
end
end
end
end
path
end
# Takes a local filesystem path, saves the file to S3, and returns the
# public (or authenticated) url on S3 where the file can be accessed.
def save(file_path)
save_path = File.join(remote_storage_prefix, File.basename(file_path))
@store.save(file_path, save_path)
end
# After the Action has finished, we remove the work directory and return
# to the root directory (where workers run by default).
def cleanup_work_directory
FileUtils.rm_r(@work_directory) if File.exists?(@work_directory)
end
# Actions have a backticks command that raises a CommandFailed exception
# on failure, so that processing doesn't just blithely continue.
private
# Convert an unsafe URL into a filesystem-friendly filename.
def safe_filename(url)
url = url.sub(/\?.*\Z/, '')
ext = File.extname(url)
name = URI.unescape(File.basename(url)).gsub(/[^a-zA-Z0-9_\-.]/, '-').gsub(/-+/, '-')
File.basename(name, ext).gsub('.', '-') + ext
end
# The directory prefix to use for remote storage.
# [action]/job_[job_id]
def remote_storage_prefix
@remote_storage_prefix ||= Inflector.underscore(self.class) +
"/job_#{@job_id}" + (@work_unit_id ? "/unit_#{@work_unit_id}" : '')
end
# The directory prefix to use for local storage.
# [action]/unit_[work_unit_id]
def local_storage_prefix
@local_storage_prefix ||= Inflector.underscore(self.class) +
(@work_unit_id ? "/unit_#{@work_unit_id}" : '')
end
# If we think that the input is JSON, replace it with the parsed form.
# It would be great if the JSON module had an is_json? method.
def parse_input
return unless ['[', '{'].include? @input[0..0]
@input = JSON.parse(@input) rescue @input
end
def input_is_url?
!URI.parse(@input).scheme.nil? rescue false
end
# If the input is a URL, download the file before beginning processing.
def download_input
return unless input_is_url?
Dir.chdir(@work_directory) do
@input_path = File.join(@work_directory, safe_filename(@input))
@file_name = File.basename(@input_path, File.extname(@input_path))
download(@input, @input_path)
end
end
end
|
sandipransing/rails_tiny_mce | plugins/paperclip/shoulda_macros/paperclip.rb | Paperclip.Shoulda.stub_paperclip_s3 | ruby | def stub_paperclip_s3(model, attachment, extension)
definition = model.gsub(" ", "_").classify.constantize.
attachment_definitions[attachment.to_sym]
path = "http://s3.amazonaws.com/:id/#{definition[:path]}"
path.gsub!(/:([^\/\.]+)/) do |match|
"([^\/\.]+)"
end
begin
FakeWeb.register_uri(:put, Regexp.new(path), :body => "OK")
rescue NameError
raise NameError, "the stub_paperclip_s3 shoulda macro requires the fakeweb gem."
end
end | Stubs the HTTP PUT for an attachment using S3 storage.
@example
stub_paperclip_s3('user', 'avatar', 'png') | train | https://github.com/sandipransing/rails_tiny_mce/blob/4e91040e62784061aa7cca37fd8a95a87df379ce/plugins/paperclip/shoulda_macros/paperclip.rb#L68-L82 | module Shoulda
include Matchers
# This will test whether you have defined your attachment correctly by
# checking for all the required fields exist after the definition of the
# attachment.
def should_have_attached_file name
klass = self.name.gsub(/Test$/, '').constantize
matcher = have_attached_file name
should matcher.description do
assert_accepts(matcher, klass)
end
end
# Tests for validations on the presence of the attachment.
def should_validate_attachment_presence name
klass = self.name.gsub(/Test$/, '').constantize
matcher = validate_attachment_presence name
should matcher.description do
assert_accepts(matcher, klass)
end
end
# Tests that you have content_type validations specified. There are two
# options, :valid and :invalid. Both accept an array of strings. The
# strings should be a list of content types which will pass and fail
# validation, respectively.
def should_validate_attachment_content_type name, options = {}
klass = self.name.gsub(/Test$/, '').constantize
valid = [options[:valid]].flatten
invalid = [options[:invalid]].flatten
matcher = validate_attachment_content_type(name).allowing(valid).rejecting(invalid)
should matcher.description do
assert_accepts(matcher, klass)
end
end
# Tests to ensure that you have file size validations turned on. You
# can pass the same options to this that you can to
# validate_attachment_file_size - :less_than, :greater_than, and :in.
# :less_than checks that a file is less than a certain size, :greater_than
# checks that a file is more than a certain size, and :in takes a Range or
# Array which specifies the lower and upper limits of the file size.
def should_validate_attachment_size name, options = {}
klass = self.name.gsub(/Test$/, '').constantize
min = options[:greater_than] || (options[:in] && options[:in].first) || 0
max = options[:less_than] || (options[:in] && options[:in].last) || (1.0/0)
range = (min..max)
matcher = validate_attachment_size(name).in(range)
should matcher.description do
assert_accepts(matcher, klass)
end
end
# Stubs the HTTP PUT for an attachment using S3 storage.
#
# @example
# stub_paperclip_s3('user', 'avatar', 'png')
# Stub S3 and return a file for attachment. Best with Factory Girl.
# Uses a strict directory convention:
#
# features/support/paperclip
#
# This method is used by the Paperclip-provided Cucumber step:
#
# When I attach a "demo_tape" "mp3" file to a "band" on S3
#
# @example
# Factory.define :band_with_demo_tape, :parent => :band do |band|
# band.demo_tape { band.paperclip_fixture("band", "demo_tape", "png") }
# end
def paperclip_fixture(model, attachment, extension)
stub_paperclip_s3(model, attachment, extension)
base_path = File.join(File.dirname(__FILE__), "..", "..",
"features", "support", "paperclip")
File.new(File.join(base_path, model, "#{attachment}.#{extension}"))
end
end
|
barkerest/incline | lib/incline/user_manager.rb | Incline.UserManager.authenticate | ruby | def authenticate(email, password, client_ip)
return nil unless Incline::EmailValidator.valid?(email)
email = email.downcase
# If an engine is registered for the email domain, then use it.
engine = get_auth_engine(email)
if engine
return engine.authenticate(email, password, client_ip)
end
# Otherwise we will be using the database.
user = User.find_by(email: email)
if user
# user must be enabled and the password must match.
unless user.enabled?
add_failure_to user, '(DB) account disabled', client_ip
return nil
end
if user.authenticate(password)
add_success_to user, '(DB)', client_ip
return user
else
add_failure_to user, '(DB) invalid password', client_ip
return nil
end
end
add_failure_to email, 'invalid email', client_ip
nil
end | Creates a new user manager.
The user manager itself takes no options, however options will be passed to
any registered authentication engines when they are instantiated.
The options can be used to pre-register engines and provide configuration for them.
The engines will have specific configurations, but the UserManager class recognizes
the 'engines' key.
{
:engines => {
'example.com' => {
:engine => MySuperAuthEngine.new(...)
},
'example.org' => {
:engine => 'incline_ldap/auth_engine',
:config => {
:host => 'ldap.example.org',
:port => 636,
:base_dn => 'DC=ldap,DC=example,DC=org'
}
}
}
}
When an 'engines' key is processed, the configuration options for the engines are pulled
from the subkeys. Once the processing of the 'engines' key is complete, it will be removed
from the options hash so any engines registered in the future will not receive the extra options.
Attempts to authenticate the user and returns the model on success. | train | https://github.com/barkerest/incline/blob/1ff08db7aa8ab7f86b223268b700bc67d15bb8aa/lib/incline/user_manager.rb#L81-L109 | class UserManager < AuthEngineBase
##
# Creates a new user manager.
#
# The user manager itself takes no options, however options will be passed to
# any registered authentication engines when they are instantiated.
#
# The options can be used to pre-register engines and provide configuration for them.
# The engines will have specific configurations, but the UserManager class recognizes
# the 'engines' key.
#
# {
# :engines => {
# 'example.com' => {
# :engine => MySuperAuthEngine.new(...)
# },
# 'example.org' => {
# :engine => 'incline_ldap/auth_engine',
# :config => {
# :host => 'ldap.example.org',
# :port => 636,
# :base_dn => 'DC=ldap,DC=example,DC=org'
# }
# }
# }
# }
#
# When an 'engines' key is processed, the configuration options for the engines are pulled
# from the subkeys. Once the processing of the 'engines' key is complete, it will be removed
# from the options hash so any engines registered in the future will not receive the extra options.
def initialize(options = {})
@options = (options || {}).deep_symbolize_keys
Incline::User.ensure_admin_exists!
if @options[:engines].is_a?(::Hash)
@options[:engines].each do |domain_name, domain_config|
if domain_config[:engine].blank?
::Incline::Log::info "Domain #{domain_name} is missing an engine definition and will not be registered."
elsif domain_config[:engine].is_a?(::Incline::AuthEngineBase)
::Incline::Log::info "Using supplied auth engine for #{domain_name}."
register_auth_engine domain_config[:engine], domain_name
else
engine =
begin
domain_config[:engine].to_s.classify.constantize
rescue NameError
nil
end
if engine
engine = engine.new(domain_config[:config] || {})
if engine.is_a?(::Incline::AuthEngineBase)
::Incline::Log::info "Using newly created auth engine for #{domain_name}."
register_auth_engine engine, domain_name
else
::Incline::Log::warn "Object created for #{domain_name} does not inherit from Incline::AuthEngineBase."
end
else
::Incline::Log::warn "Failed to create auth engine for #{domain_name}."
end
end
end
end
@options.delete(:engines)
end
##
# Attempts to authenticate the user and returns the model on success.
##
# The begin_external_authentication method takes a request object to determine if it should process a login
# or return nil. If it decides to process authentication, it should return a URL to redirect to.
def begin_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.begin_external_authentication(request)
return url unless url.blank?
end
end
nil
end
##
# The end_external_authentication method takes a request object to determine if it should process a logout
# or return nil. If it decides to process authentication, it should return a URL to redirect to.
def end_external_authentication(request)
# We don't have an email domain to work from.
# Instead, we'll call each engine's authenticate_external method.
# If one of them returns a user, then we return that value and skip further processing.
auth_engines.each do |dom,engine|
unless engine.nil?
url = engine.end_external_authentication(request)
return url unless url.blank?
end
end
nil
end
##
# Attempts to authenticate the user and returns the model on success.
def self.authenticate(email, password, client_ip)
default.authenticate email, password, client_ip
end
##
# Returns a URL if an external login is to be used, or nil to use local authentication.
def self.begin_external_authentication(request)
default.begin_external_authentication request
end
##
# Returns a URL if an external logout is to be used, or nil to use local authentication.
def self.end_external_authentication(request)
default.end_external_authentication request
end
##
# Registers an authentication engine for one or more domains.
#
# The +engine+ passed in should take an options hash as the only argument to +initialize+
# and should provide an +authenticate+ method that takes the +email+, +password+, and
# +client_ip+. You can optionally define an +authenticate_external+ method that takes the
# current +request+ as the only parameter.
#
# The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
# The +begin_external_authentication+ method of the engine should return a URL to redirect to on success
# or nil on failure.
#
# class MyAuthEngine
# def initialize(options = {})
# ...
# end
#
# def authenticate(email, password, client_ip)
# ...
# end
#
# def begin_external_authentication(request)
# ...
# end
# end
#
# Incline::UserManager.register_auth_engine(MyAuthEngine, 'example.com', 'example.net', 'example.org')
#
def register_auth_engine(engine, *domains)
unless engine.nil?
unless engine.is_a?(::Incline::AuthEngineBase)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Class)
engine = engine.new(@options)
raise ArgumentError, "The 'engine' parameter must be an instance of an auth engine or a class defining an auth engine." unless engine.is_a?(::Incline::AuthEngineBase)
end
end
domains.map do |dom|
dom = dom.to_s.downcase.strip
raise ArgumentError, "The domain #{dom.inspect} does not appear to be a valid domain." unless dom =~ /\A[a-z0-9]+(?:[-.][a-z0-9]+)*\.[a-z]+\Z/
dom
end.each do |dom|
auth_engines[dom] = engine
end
end
##
# Registers an authentication engine for one or more domains.
#
# The +engine+ passed in should take an options hash as the only argument to +initialize+
# and should provide an +authenticate+ method that takes the +email+, +password+, and
# +client_ip+.
#
# The +authenticate+ method of the engine should return an Incline::User object on success or nil on failure.
def self.register_auth_engine(engine, *domains)
default.register_auth_engine(engine, *domains)
end
##
# Clears any registered authentication engine for one or more domains.
def clear_auth_engine(*domains)
register_auth_engine(nil, *domains)
end
##
# Clears any registered authentication engine for one or more domains.
def self.clear_auth_engine(*domains)
default.clear_auth_engine(*domains)
end
private
def auth_engines
@auth_engines ||= { }
end
def get_auth_engine(email)
dom = email.partition('@')[2].downcase
auth_engines[dom]
end
def self.auth_config
@auth_config ||=
begin
cfg = Rails.root.join('config','auth.yml')
if File.exist?(cfg)
cfg = YAML.load(ERB.new(File.read(cfg)).result)
if cfg.is_a?(::Hash)
cfg = cfg[Rails.env]
(cfg || {}).symbolize_keys
else
{}
end
else
{}
end
end
end
def self.default
@default ||= UserManager.new(auth_config)
end
end
|
hashicorp/vagrant | lib/vagrant/environment.rb | Vagrant.Environment.host | ruby | def host
return @host if defined?(@host)
# Determine the host class to use. ":detect" is an old Vagrant config
# that shouldn't be valid anymore, but we respect it here by assuming
# its old behavior. No need to deprecate this because I thin it is
# fairly harmless.
host_klass = vagrantfile.config.vagrant.host
host_klass = nil if host_klass == :detect
begin
@host = Host.new(
host_klass,
Vagrant.plugin("2").manager.hosts,
Vagrant.plugin("2").manager.host_capabilities,
self)
rescue Errors::CapabilityHostNotDetected
# If the auto-detect failed, then we create a brand new host
# with no capabilities and use that. This should almost never happen
# since Vagrant works on most host OS's now, so this is a "slow path"
klass = Class.new(Vagrant.plugin("2", :host)) do
def detect?(env); true; end
end
hosts = { generic: [klass, nil] }
host_caps = {}
@host = Host.new(:generic, hosts, host_caps, self)
rescue Errors::CapabilityHostExplicitNotDetected => e
raise Errors::HostExplicitNotDetected, e.extra_data
end
end | Returns the host object associated with this environment.
@return [Class] | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/environment.rb#L533-L564 | class Environment
# This is the current version that this version of Vagrant is
# compatible with in the home directory.
#
# @return [String]
CURRENT_SETUP_VERSION = "1.5"
DEFAULT_LOCAL_DATA = ".vagrant"
# The `cwd` that this environment represents
attr_reader :cwd
# The persistent data directory where global data can be stored. It
# is up to the creator of the data in this directory to properly
# remove it when it is no longer needed.
#
# @return [Pathname]
attr_reader :data_dir
# The valid name for a Vagrantfile for this environment.
attr_reader :vagrantfile_name
# The {UI} object to communicate with the outside world.
attr_reader :ui
# This is the UI class to use when creating new UIs.
attr_reader :ui_class
# The directory to the "home" folder that Vagrant will use to store
# global state.
attr_reader :home_path
# The directory to the directory where local, environment-specific
# data is stored.
attr_reader :local_data_path
# The directory where temporary files for Vagrant go.
attr_reader :tmp_path
# File where command line aliases go.
attr_reader :aliases_path
# The directory where boxes are stored.
attr_reader :boxes_path
# The path where the plugins are stored (gems)
attr_reader :gems_path
# The path to the default private key
attr_reader :default_private_key_path
# Initializes a new environment with the given options. The options
# is a hash where the main available key is `cwd`, which defines where
# the environment represents. There are other options available but
# they shouldn't be used in general. If `cwd` is nil, then it defaults
# to the `Dir.pwd` (which is the cwd of the executing process).
def initialize(opts=nil)
opts = {
cwd: nil,
home_path: nil,
local_data_path: nil,
ui_class: nil,
vagrantfile_name: nil,
}.merge(opts || {})
# Set the default working directory to look for the vagrantfile
opts[:cwd] ||= ENV["VAGRANT_CWD"] if ENV.key?("VAGRANT_CWD")
opts[:cwd] ||= Dir.pwd
opts[:cwd] = Pathname.new(opts[:cwd])
if !opts[:cwd].directory?
raise Errors::EnvironmentNonExistentCWD, cwd: opts[:cwd].to_s
end
opts[:cwd] = opts[:cwd].expand_path
# Set the default ui class
opts[:ui_class] ||= UI::Silent
# Set the Vagrantfile name up. We append "Vagrantfile" and "vagrantfile" so that
# those continue to work as well, but anything custom will take precedence.
opts[:vagrantfile_name] ||= ENV["VAGRANT_VAGRANTFILE"] if \
ENV.key?("VAGRANT_VAGRANTFILE")
opts[:vagrantfile_name] = [opts[:vagrantfile_name]] if \
opts[:vagrantfile_name] && !opts[:vagrantfile_name].is_a?(Array)
# Set instance variables for all the configuration parameters.
@cwd = opts[:cwd]
@home_path = opts[:home_path]
@vagrantfile_name = opts[:vagrantfile_name]
@ui = opts[:ui_class].new
@ui_class = opts[:ui_class]
# This is the batch lock, that enforces that only one {BatchAction}
# runs at a time from {#batch}.
@batch_lock = Mutex.new
@locks = {}
@logger = Log4r::Logger.new("vagrant::environment")
@logger.info("Environment initialized (#{self})")
@logger.info(" - cwd: #{cwd}")
# Setup the home directory
@home_path ||= Vagrant.user_data_path
@home_path = Util::Platform.fs_real_path(@home_path)
@boxes_path = @home_path.join("boxes")
@data_dir = @home_path.join("data")
@gems_path = Vagrant::Bundler.instance.plugin_gem_path
@tmp_path = @home_path.join("tmp")
@machine_index_dir = @data_dir.join("machine-index")
@aliases_path = Pathname.new(ENV["VAGRANT_ALIAS_FILE"]).expand_path if ENV.key?("VAGRANT_ALIAS_FILE")
@aliases_path ||= @home_path.join("aliases")
# Prepare the directories
setup_home_path
# Setup the local data directory. If a configuration path is given,
# it is expanded relative to the root path. Otherwise, we use the
# default (which is also expanded relative to the root path).
if !root_path.nil?
if !ENV["VAGRANT_DOTFILE_PATH"].to_s.empty? && !opts[:child]
opts[:local_data_path] ||= Pathname.new(File.expand_path(ENV["VAGRANT_DOTFILE_PATH"], root_path))
else
opts[:local_data_path] ||= root_path.join(DEFAULT_LOCAL_DATA)
end
end
if opts[:local_data_path]
@local_data_path = Pathname.new(File.expand_path(opts[:local_data_path], @cwd))
end
@logger.debug("Effective local data path: #{@local_data_path}")
# If we have a root path, load the ".vagrantplugins" file.
if root_path
plugins_file = root_path.join(".vagrantplugins")
if plugins_file.file?
@logger.info("Loading plugins file: #{plugins_file}")
load plugins_file
end
end
setup_local_data_path
# Setup the default private key
@default_private_key_path = @home_path.join("insecure_private_key")
copy_insecure_private_key
# Initialize localized plugins
plugins = Vagrant::Plugin::Manager.instance.localize!(self)
# Load any environment local plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
# Initialize globalize plugins
plugins = Vagrant::Plugin::Manager.instance.globalize!
# Load any global plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
if !vagrantfile.config.vagrant.plugins.empty?
plugins = process_configured_plugins
end
# Call the hooks that does not require configurations to be loaded
# by using a "clean" action runner
hook(:environment_plugins_loaded, runner: Action::Runner.new(env: self))
# Call the environment load hooks
hook(:environment_load, runner: Action::Runner.new(env: self))
end
# Return a human-friendly string for pretty printed or inspected
# instances.
#
# @return [String]
def inspect
"#<#{self.class}: #{@cwd}>".encode('external')
end
# Action runner for executing actions in the context of this environment.
#
# @return [Action::Runner]
def action_runner
@action_runner ||= Action::Runner.new do
{
action_runner: action_runner,
box_collection: boxes,
hook: method(:hook),
host: host,
machine_index: machine_index,
gems_path: gems_path,
home_path: home_path,
root_path: root_path,
tmp_path: tmp_path,
ui: @ui,
env: self
}
end
end
# Returns a list of machines that this environment is currently
# managing that physically have been created.
#
# An "active" machine is a machine that Vagrant manages that has
# been created. The machine itself may be in any state such as running,
# suspended, etc. but if a machine is "active" then it exists.
#
# Note that the machines in this array may no longer be present in
# the Vagrantfile of this environment. In this case the machine can
# be considered an "orphan." Determining which machines are orphan
# and which aren't is not currently a supported feature, but will
# be in a future version.
#
# @return [Array<String, Symbol>]
def active_machines
# We have no active machines if we have no data path
return [] if !@local_data_path
machine_folder = @local_data_path.join("machines")
# If the machine folder is not a directory then we just return
# an empty array since no active machines exist.
return [] if !machine_folder.directory?
# Traverse the machines folder accumulate a result
result = []
machine_folder.children(true).each do |name_folder|
# If this isn't a directory then it isn't a machine
next if !name_folder.directory?
name = name_folder.basename.to_s.to_sym
name_folder.children(true).each do |provider_folder|
# If this isn't a directory then it isn't a provider
next if !provider_folder.directory?
# If this machine doesn't have an ID, then ignore
next if !provider_folder.join("id").file?
provider = provider_folder.basename.to_s.to_sym
result << [name, provider]
end
end
# Return the results
result
end
# This creates a new batch action, yielding it, and then running it
# once the block is called.
#
# This handles the case where batch actions are disabled by the
# VAGRANT_NO_PARALLEL environmental variable.
def batch(parallel=true)
parallel = false if ENV["VAGRANT_NO_PARALLEL"]
@batch_lock.synchronize do
BatchAction.new(parallel).tap do |b|
# Yield it so that the caller can setup actions
yield b
# And run it!
b.run
end
end
end
# Makes a call to the CLI with the given arguments as if they
# came from the real command line (sometimes they do!). An example:
#
# env.cli("package", "--vagrantfile", "Vagrantfile")
#
def cli(*args)
CLI.new(args.flatten, self).execute
end
# This returns the provider name for the default provider for this
# environment.
#
# @return [Symbol] Name of the default provider.
def default_provider(**opts)
opts[:exclude] = Set.new(opts[:exclude]) if opts[:exclude]
opts[:force_default] = true if !opts.key?(:force_default)
opts[:check_usable] = true if !opts.key?(:check_usable)
# Implement the algorithm from
# https://www.vagrantup.com/docs/providers/basic_usage.html#default-provider
# with additional steps 2.5 and 3.5 from
# https://bugzilla.redhat.com/show_bug.cgi?id=1444492
# to allow system-configured provider priorities.
#
# 1. The --provider flag on a vagrant up is chosen above all else, if it is
# present.
#
# (Step 1 is done by the caller; this method is only called if --provider
# wasn't given.)
#
# 2. If the VAGRANT_DEFAULT_PROVIDER environmental variable is set, it
# takes next priority and will be the provider chosen.
default = ENV["VAGRANT_DEFAULT_PROVIDER"].to_s
if default.empty?
default = nil
else
default = default.to_sym
@logger.debug("Default provider: `#{default}`")
end
# If we're forcing the default, just short-circuit and return
# that (the default behavior)
if default && opts[:force_default]
@logger.debug("Using forced default provider: `#{default}`")
return default
end
# Determine the config to use to look for provider definitions. By
# default it is the global but if we're targeting a specific machine,
# then look there.
root_config = vagrantfile.config
if opts[:machine]
machine_info = vagrantfile.machine_config(opts[:machine], nil, nil, nil)
root_config = machine_info[:config]
end
# Get the list of providers within our configuration, in order.
config = root_config.vm.__providers
# Get the list of usable providers with their internally-declared
# priorities.
usable = []
Vagrant.plugin("2").manager.providers.each do |key, data|
impl = data[0]
popts = data[1]
# Skip excluded providers
next if opts[:exclude] && opts[:exclude].include?(key)
# Skip providers that can't be defaulted, unless they're in our
# config, in which case someone made our decision for us.
if !config.include?(key)
next if popts.key?(:defaultable) && !popts[:defaultable]
end
# Skip providers that aren't usable.
next if opts[:check_usable] && !impl.usable?(false)
# Each provider sets its own priority, defaulting to 5 so we can trust
# it's always set.
usable << [popts[:priority], key]
end
@logger.debug("Initial usable provider list: #{usable}")
# Sort the usable providers by priority. Higher numbers are higher
# priority, otherwise alpha sort.
usable = usable.sort {|a, b| a[0] == b[0] ? a[1] <=> b[1] : b[0] <=> a[0]}
.map {|prio, key| key}
@logger.debug("Priority sorted usable provider list: #{usable}")
# If we're not forcing the default, but it's usable and hasn't been
# otherwise excluded, return it now.
if usable.include?(default)
@logger.debug("Using default provider `#{default}` as it was found in usable list.")
return default
end
# 2.5. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first
# provider that is usable and listed in VAGRANT_PREFERRED_PROVIDERS.
preferred = ENV.fetch('VAGRANT_PREFERRED_PROVIDERS', '')
.split(',')
.map {|s| s.strip}
.select {|s| !s.empty?}
.map {|s| s.to_sym}
@logger.debug("Preferred provider list: #{preferred}")
config.each do |key|
if usable.include?(key) && preferred.include?(key)
@logger.debug("Using preferred provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first provider
# that is usable. For example, if you configure Hyper-V, it will never
# be chosen on Mac this way. It must be both configured and usable.
config.each do |key|
if usable.include?(key)
@logger.debug("Using provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3.5. Vagrant will go through VAGRANT_PREFERRED_PROVIDERS and find the
# first plugin that reports it is usable.
preferred.each do |key|
if usable.include?(key)
@logger.debug("Using preferred provider `#{key}` found in usable list.")
return key
end
end
# 4. Vagrant will go through all installed provider plugins (including the
# ones that come with Vagrant), and find the first plugin that reports
# it is usable. There is a priority system here: systems that are known
# better have a higher priority than systems that are worse. For
# example, if you have the VMware provider installed, it will always
# take priority over VirtualBox.
if !usable.empty?
@logger.debug("Using provider `#{usable[0]}` as it is the highest priority in the usable list.")
return usable[0]
end
# 5. If Vagrant still has not found any usable providers, it will error.
# No providers available is a critical error for Vagrant.
raise Errors::NoDefaultProvider
end
# Returns whether or not we know how to install the provider with
# the given name.
#
# @return [Boolean]
def can_install_provider?(name)
host.capability?(provider_install_key(name))
end
# Installs the provider with the given name.
#
# This will raise an exception if we don't know how to install the
# provider with the given name. You should guard this call with
# `can_install_provider?` for added safety.
#
# An exception will be raised if there are any failures installing
# the provider.
def install_provider(name)
host.capability(provider_install_key(name))
end
# Returns the collection of boxes for the environment.
#
# @return [BoxCollection]
def boxes
@_boxes ||= BoxCollection.new(
boxes_path,
hook: method(:hook),
temp_dir_root: tmp_path)
end
# Returns the {Config::Loader} that can be used to load Vagrantfiles
# given the settings of this environment.
#
# @return [Config::Loader]
def config_loader
return @config_loader if @config_loader
home_vagrantfile = nil
root_vagrantfile = nil
home_vagrantfile = find_vagrantfile(home_path) if home_path
if root_path
root_vagrantfile = find_vagrantfile(root_path, @vagrantfile_name)
end
@config_loader = Config::Loader.new(
Config::VERSIONS, Config::VERSIONS_ORDER)
@config_loader.set(:home, home_vagrantfile) if home_vagrantfile
@config_loader.set(:root, root_vagrantfile) if root_vagrantfile
@config_loader
end
# Loads another environment for the given Vagrantfile, sharing as much
# useful state from this Environment as possible (such as UI and paths).
# Any initialization options can be overidden using the opts hash.
#
# @param [String] vagrantfile Path to a Vagrantfile
# @return [Environment]
def environment(vagrantfile, **opts)
path = File.expand_path(vagrantfile, root_path)
file = File.basename(path)
path = File.dirname(path)
Util::SilenceWarnings.silence! do
Environment.new({
child: true,
cwd: path,
home_path: home_path,
ui_class: ui_class,
vagrantfile_name: file,
}.merge(opts))
end
end
# This defines a hook point where plugin action hooks that are registered
# against the given name will be run in the context of this environment.
#
# @param [Symbol] name Name of the hook.
# @param [Action::Runner] action_runner A custom action runner for running hooks.
def hook(name, opts=nil)
@logger.info("Running hook: #{name}")
opts ||= {}
opts[:callable] ||= Action::Builder.new
opts[:runner] ||= action_runner
opts[:action_name] = name
opts[:env] = self
opts.delete(:runner).run(opts.delete(:callable), opts)
end
# Returns the host object associated with this environment.
#
# @return [Class]
# This acquires a process-level lock with the given name.
#
# The lock file is held within the data directory of this environment,
# so make sure that all environments that are locking are sharing
# the same data directory.
#
# This will raise Errors::EnvironmentLockedError if the lock can't
# be obtained.
#
# @param [String] name Name of the lock, since multiple locks can
# be held at one time.
def lock(name="global", **opts)
f = nil
# If we don't have a block, then locking is useless, so ignore it
return if !block_given?
# This allows multiple locks in the same process to be nested
return yield if @locks[name] || opts[:noop]
# The path to this lock
lock_path = data_dir.join("lock.#{name}.lock")
@logger.debug("Attempting to acquire process-lock: #{name}")
lock("dotlock", noop: name == "dotlock", retry: true) do
f = File.open(lock_path, "w+")
end
# The file locking fails only if it returns "false." If it
# succeeds it returns a 0, so we must explicitly check for
# the proper error case.
while f.flock(File::LOCK_EX | File::LOCK_NB) === false
@logger.warn("Process-lock in use: #{name}")
if !opts[:retry]
raise Errors::EnvironmentLockedError,
name: name
end
sleep 0.2
end
@logger.info("Acquired process lock: #{name}")
result = nil
begin
# Mark that we have a lock
@locks[name] = true
result = yield
ensure
# We need to make sure that no matter what this is always
# reset to false so we don't think we have a lock when we
# actually don't.
@locks.delete(name)
@logger.info("Released process lock: #{name}")
end
# Clean up the lock file, this requires another lock
if name != "dotlock"
lock("dotlock", retry: true) do
f.close
begin
File.delete(lock_path)
rescue
@logger.error(
"Failed to delete lock file #{lock_path} - some other thread " +
"might be trying to acquire it. ignoring this error")
end
end
end
# Return the result
return result
ensure
begin
f.close if f
rescue IOError
end
end
# This executes the push with the given name, raising any exceptions that
# occur.
#
# Precondition: the push is not nil and exists.
def push(name)
@logger.info("Getting push: #{name}")
name = name.to_sym
pushes = self.vagrantfile.config.push.__compiled_pushes
if !pushes.key?(name)
raise Vagrant::Errors::PushStrategyNotDefined,
name: name,
pushes: pushes.keys
end
strategy, config = pushes[name]
push_registry = Vagrant.plugin("2").manager.pushes
klass, _ = push_registry.get(strategy)
if klass.nil?
raise Vagrant::Errors::PushStrategyNotLoaded,
name: strategy,
pushes: push_registry.keys
end
klass.new(self, config).push
end
# The list of pushes defined in this Vagrantfile.
#
# @return [Array<Symbol>]
def pushes
self.vagrantfile.config.push.__compiled_pushes.keys
end
# This returns a machine with the proper provider for this environment.
# The machine named by `name` must be in this environment.
#
# @param [Symbol] name Name of the machine (as configured in the
# Vagrantfile).
# @param [Symbol] provider The provider that this machine should be
# backed by.
# @param [Boolean] refresh If true, then if there is a cached version
# it is reloaded.
# @return [Machine]
def machine(name, provider, refresh=false)
@logger.info("Getting machine: #{name} (#{provider})")
# Compose the cache key of the name and provider, and return from
# the cache if we have that.
cache_key = [name, provider]
@machines ||= {}
if refresh
@logger.info("Refreshing machine (busting cache): #{name} (#{provider})")
@machines.delete(cache_key)
end
if @machines.key?(cache_key)
@logger.info("Returning cached machine: #{name} (#{provider})")
return @machines[cache_key]
end
@logger.info("Uncached load of machine.")
# Determine the machine data directory and pass it to the machine.
machine_data_path = @local_data_path.join(
"machines/#{name}/#{provider}")
# Create the machine and cache it for future calls. This will also
# return the machine from this method.
@machines[cache_key] = vagrantfile.machine(
name, provider, boxes, machine_data_path, self)
end
# The {MachineIndex} to store information about the machines.
#
# @return [MachineIndex]
def machine_index
@machine_index ||= MachineIndex.new(@machine_index_dir)
end
# This returns a list of the configured machines for this environment.
# Each of the names returned by this method is valid to be used with
# the {#machine} method.
#
# @return [Array<Symbol>] Configured machine names.
def machine_names
vagrantfile.machine_names
end
# This returns the name of the machine that is the "primary." In the
# case of a single-machine environment, this is just the single machine
# name. In the case of a multi-machine environment, then this can
# potentially be nil if no primary machine is specified.
#
# @return [Symbol]
def primary_machine_name
vagrantfile.primary_machine_name
end
# The root path is the path where the top-most (loaded last)
# Vagrantfile resides. It can be considered the project root for
# this environment.
#
# @return [String]
def root_path
return @root_path if defined?(@root_path)
root_finder = lambda do |path|
# Note: To remain compatible with Ruby 1.8, we have to use
# a `find` here instead of an `each`.
vf = find_vagrantfile(path, @vagrantfile_name)
return path if vf
return nil if path.root? || !File.exist?(path)
root_finder.call(path.parent)
end
@root_path = root_finder.call(cwd)
end
# Unload the environment, running completion hooks. The environment
# should not be used after this (but CAN be, technically). It is
# recommended to always immediately set the variable to `nil` after
# running this so you can't accidentally run any more methods. Example:
#
# env.unload
# env = nil
#
def unload
hook(:environment_unload)
end
# Represents the default Vagrantfile, or the Vagrantfile that is
# in the working directory or a parent of the working directory
# of this environment.
#
# The existence of this function is primarily a convenience. There
# is nothing stopping you from instantiating your own {Vagrantfile}
# and loading machines in any way you see fit. Typical behavior of
# Vagrant, however, loads this Vagrantfile.
#
# This Vagrantfile is comprised of two major sources: the Vagrantfile
# in the user's home directory as well as the "root" Vagrantfile or
# the Vagrantfile in the working directory (or parent).
#
# @return [Vagrantfile]
def vagrantfile
@vagrantfile ||= Vagrantfile.new(config_loader, [:home, :root])
end
#---------------------------------------------------------------
# Load Methods
#---------------------------------------------------------------
# This sets the `@home_path` variable properly.
#
# @return [Pathname]
def setup_home_path
@logger.info("Home path: #{@home_path}")
# Setup the list of child directories that need to be created if they
# don't already exist.
dirs = [
@home_path,
@home_path.join("rgloader"),
@boxes_path,
@data_dir,
@gems_path,
@tmp_path,
@machine_index_dir,
]
# Go through each required directory, creating it if it doesn't exist
dirs.each do |dir|
next if File.directory?(dir)
begin
@logger.info("Creating: #{dir}")
FileUtils.mkdir_p(dir)
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
end
# Attempt to write into the home directory to verify we can
begin
# Append a random suffix to avoid race conditions if Vagrant
# is running in parallel with other Vagrant processes.
suffix = (0...32).map { (65 + rand(26)).chr }.join
path = @home_path.join("perm_test_#{suffix}")
path.open("w") do |f|
f.write("hello")
end
path.unlink
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
# Create the version file that we use to track the structure of
# the home directory. If we have an old version, we need to explicitly
# upgrade it. Otherwise, we just mark that its the current version.
version_file = @home_path.join("setup_version")
if version_file.file?
version = version_file.read.chomp
if version > CURRENT_SETUP_VERSION
raise Errors::HomeDirectoryLaterVersion
end
case version
when CURRENT_SETUP_VERSION
# We're already good, at the latest version.
when "1.1"
# We need to update our directory structure
upgrade_home_path_v1_1
# Delete the version file so we put our latest version in
version_file.delete
else
raise Errors::HomeDirectoryUnknownVersion,
path: @home_path.to_s,
version: version
end
end
if !version_file.file?
@logger.debug(
"Creating home directory version file: #{CURRENT_SETUP_VERSION}")
version_file.open("w") do |f|
f.write(CURRENT_SETUP_VERSION)
end
end
# Create the rgloader/loader file so we can use encoded files.
loader_file = @home_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
end
# This creates the local data directory and show an error if it
# couldn't properly be created.
def setup_local_data_path(force=false)
if @local_data_path.nil?
@logger.warn("No local data path is set. Local data cannot be stored.")
return
end
@logger.info("Local data path: #{@local_data_path}")
# If the local data path is a file, then we are probably seeing an
# old (V1) "dotfile." In this case, we upgrade it. The upgrade process
# will remove the old data file if it is successful.
if @local_data_path.file?
upgrade_v1_dotfile(@local_data_path)
end
# If we don't have a root path, we don't setup anything
return if !force && root_path.nil?
begin
@logger.debug("Creating: #{@local_data_path}")
FileUtils.mkdir_p(@local_data_path)
# Create the rgloader/loader file so we can use encoded files.
loader_file = @local_data_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.mkdir_p(@local_data_path.join("rgloader").to_s)
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
rescue Errno::EACCES
raise Errors::LocalDataDirectoryNotAccessible,
local_data_path: @local_data_path.to_s
end
end
protected
# Check for any local plugins defined within the Vagrantfile. If
# found, validate they are available. If they are not available,
# request to install them, or raise an exception
#
# @return [Hash] plugin list for loading
def process_configured_plugins
return if !Vagrant.plugins_enabled?
errors = vagrantfile.config.vagrant.validate(nil)
if !errors["vagrant"].empty?
raise Errors::ConfigInvalid,
errors: Util::TemplateRenderer.render(
"config/validation_failed",
errors: errors)
end
# Check if defined plugins are installed
installed = Plugin::Manager.instance.installed_plugins
needs_install = []
config_plugins = vagrantfile.config.vagrant.plugins
config_plugins.each do |name, info|
if !installed[name]
needs_install << name
end
end
if !needs_install.empty?
ui.warn(I18n.t("vagrant.plugins.local.uninstalled_plugins",
plugins: needs_install.sort.join(", ")))
if !Vagrant.auto_install_local_plugins?
answer = nil
until ["y", "n"].include?(answer)
answer = ui.ask(I18n.t("vagrant.plugins.local.request_plugin_install") + " [N]: ")
answer = answer.strip.downcase
answer = "n" if answer.to_s.empty?
end
if answer == "n"
raise Errors::PluginMissingLocalError,
plugins: needs_install.sort.join(", ")
end
end
needs_install.each do |name|
pconfig = Util::HashWithIndifferentAccess.new(config_plugins[name])
ui.info(I18n.t("vagrant.commands.plugin.installing", name: name))
options = {sources: Vagrant::Bundler::DEFAULT_GEM_SOURCES.dup, env_local: true}
options[:sources] = pconfig[:sources] if pconfig[:sources]
options[:require] = pconfig[:entry_point] if pconfig[:entry_point]
options[:version] = pconfig[:version] if pconfig[:version]
spec = Plugin::Manager.instance.install_plugin(name, options)
ui.info(I18n.t("vagrant.commands.plugin.installed",
name: spec.name, version: spec.version.to_s))
end
ui.info("\n")
# Force halt after installation and require command to be run again. This
# will proper load any new locally installed plugins which are now available.
ui.warn(I18n.t("vagrant.plugins.local.install_rerun_command"))
exit(-1)
end
Vagrant::Plugin::Manager.instance.local_file.installed_plugins
end
# This method copies the private key into the home directory if it
# doesn't already exist.
#
# This must be done because `ssh` requires that the key is chmod
# 0600, but if Vagrant is installed as a separate user, then the
# effective uid won't be able to read the key. So the key is copied
# to the home directory and chmod 0600.
def copy_insecure_private_key
if !@default_private_key_path.exist?
@logger.info("Copying private key to home directory")
source = File.expand_path("keys/vagrant", Vagrant.source_root)
destination = @default_private_key_path
begin
FileUtils.cp(source, destination)
rescue Errno::EACCES
raise Errors::CopyPrivateKeyFailed,
source: source,
destination: destination
end
end
if !Util::Platform.windows?
# On Windows, permissions don't matter as much, so don't worry
# about doing chmod.
if Util::FileMode.from_octal(@default_private_key_path.stat.mode) != "600"
@logger.info("Changing permissions on private key to 0600")
@default_private_key_path.chmod(0600)
end
end
end
# Finds the Vagrantfile in the given directory.
#
# @param [Pathname] path Path to search in.
# @return [Pathname]
def find_vagrantfile(search_path, filenames=nil)
filenames ||= ["Vagrantfile", "vagrantfile"]
filenames.each do |vagrantfile|
current_path = search_path.join(vagrantfile)
return current_path if current_path.file?
end
nil
end
# Returns the key used for the host capability for provider installs
# of the given name.
def provider_install_key(name)
"provider_install_#{name}".to_sym
end
# This upgrades a home directory that was in the v1.1 format to the
# v1.5 format. It will raise exceptions if anything fails.
def upgrade_home_path_v1_1
if !ENV["VAGRANT_UPGRADE_SILENT_1_5"]
@ui.ask(I18n.t("vagrant.upgrading_home_path_v1_5"))
end
collection = BoxCollection.new(
@home_path.join("boxes"), temp_dir_root: tmp_path)
collection.upgrade_v1_1_v1_5
end
# This upgrades a Vagrant 1.0.x "dotfile" to the new V2 format.
#
# This is a destructive process. Once the upgrade is complete, the
# old dotfile is removed, and the environment becomes incompatible for
# Vagrant 1.0 environments.
#
# @param [Pathname] path The path to the dotfile
def upgrade_v1_dotfile(path)
@logger.info("Upgrading V1 dotfile to V2 directory structure...")
# First, verify the file isn't empty. If it is an empty file, we
# just delete it and go on with life.
contents = path.read.strip
if contents.strip == ""
@logger.info("V1 dotfile was empty. Removing and moving on.")
path.delete
return
end
# Otherwise, verify there is valid JSON in here since a Vagrant
# environment would always ensure valid JSON. This is a sanity check
# to make sure we don't nuke a dotfile that is not ours...
@logger.debug("Attempting to parse JSON of V1 file")
json_data = nil
begin
json_data = JSON.parse(contents)
@logger.debug("JSON parsed successfully. Things are okay.")
rescue JSON::ParserError
# The file could've been tampered with since Vagrant 1.0.x is
# supposed to ensure that the contents are valid JSON. Show an error.
raise Errors::DotfileUpgradeJSONError,
state_file: path.to_s
end
# Alright, let's upgrade this guy to the new structure. Start by
# backing up the old dotfile.
backup_file = path.dirname.join(".vagrant.v1.#{Time.now.to_i}")
@logger.info("Renaming old dotfile to: #{backup_file}")
path.rename(backup_file)
# Now, we create the actual local data directory. This should succeed
# this time since we renamed the old conflicting V1.
setup_local_data_path(true)
if json_data["active"]
@logger.debug("Upgrading to V2 style for each active VM")
json_data["active"].each do |name, id|
@logger.info("Upgrading dotfile: #{name} (#{id})")
# Create the machine configuration directory
directory = @local_data_path.join("machines/#{name}/virtualbox")
FileUtils.mkdir_p(directory)
# Write the ID file
directory.join("id").open("w+") do |f|
f.write(id)
end
end
end
# Upgrade complete! Let the user know
@ui.info(I18n.t("vagrant.general.upgraded_v1_dotfile",
backup_path: backup_file.to_s))
end
end
|
spiegela/MultiBitField | lib/multi_bit_field.rb | MultiBitField.ClassMethods.increment_mask_for | ruby | def increment_mask_for column_name, *fields
fields = bitfields if fields.empty?
column = @@bitfields[column_name]
raise ArgumentError, "Unknown column for bitfield: #{column_name}" if column.nil?
fields.sum do |field_name|
raise ArugmentError, "Unknown field: #{field_name} for column #{column_name}" if column[field_name].nil?
2 ** (bitfield_max(column_name) - column[field_name].last)
end
end | Returns an "increment mask" for a list of fields
+increment_mask_for :fields
@example
user.increment_mask_for :field
@param [ Symbol ] column name of the column these fields are in
@param [ Symbol ] field(s) name of the field(s) for the mask | train | https://github.com/spiegela/MultiBitField/blob/53674ba73caea8d871510d02271b71edaa1335f1/lib/multi_bit_field.rb#L103-L111 | module ClassMethods
# alias :reset_bitfields :reset_bitfield
# Assign bitfields to a column
#
# +has_bit_field :column, :fields
#
# @example
# class User < ActiveRecord::Base
# has_bit_field :counter, :daily => 0..4, :weekly => 5..9, :monthly => 10..14
# end
#
# @param [ Symbol ] column Integer attribute to store bitfields
# @param [ Hash ] fields Specify the bitfield name, and the columns
# of the bitstring assigned to it
def has_bit_field column, fields
bitfield_setup! column, fields
fields.each do |field_name, filum|
class_eval <<-EVAL
def #{field_name}
get_bits_for(:#{column}, :#{field_name})
end
def #{field_name}=(value)
set_bits_for(:#{column}, :#{field_name}, value)
end
EVAL
end
end
# Returns the size of the bitfield in number of bits
#
# +bitfield_max :column
#
# @example
# user.bitfield_max :counter
#
# @param [ Symbol ] column_name column name that stores the bitfield integer
#
def bitfield_max column_name
@@bitfields[column_name].values.sum.max
end
# Returns the field names for the bitfield column
#
# +bitfields
#
# @example
# user.bitfields :counter
#
# @param [ Symbol ] column_name column name that stores the bitfield integer
#
def bitfields column_name
@@bitfields[column_name].keys
end
# Returns the column by name
#
# +column_for
#
# @param [ Symbol ] column_name column name that stores the bitfield integer
#
def range_for column_name, field_name
column = @@bitfields[column_name]
raise ArgumentError, "Unknown column for bitfield: #{column_name}" if column.nil?
return column[field_name] if column[field_name]
raise ArugmentError, "Unknown field: #{field_name} for column #{column_name}"
end
# Returns a "reset mask" for a list of fields
#
# +reset_mask_for :fields
#
# @example
# user.reset_mask_for :field
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) for the mask
def reset_mask_for column_name, *fields
fields = bitfields if fields.empty?
max = bitfield_max(column_name)
(0..max).sum{|i| 2 ** i} - only_mask_for(column_name, *fields)
end
# Returns an "increment mask" for a list of fields
#
# +increment_mask_for :fields
#
# @example
# user.increment_mask_for :field
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) for the mask
# Returns an "only mask" for a list of fields
#
# +only_mask_for :fields
#
# @example
# user.only_mask_for :field
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) for the mask
def only_mask_for column_name, *fields
fields = bitfields if fields.empty?
column = @@bitfields[column_name]
max = bitfield_max(column_name)
raise ArgumentError, "Unknown column for bitfield: #{column_name}" if column.nil?
column.sum do |field_name, range|
fields.include?(field_name) ? range.invert(max).sum{|i| 2 ** i} : 0
end
# fields.inject("0" * (bitfield_max(column_name) + 1)) do |mask, field_name|
# raise ArugmentError, "Unknown field: #{field_name} for column #{column_name}" if column[field_name].nil?
# range = column[field_name]
# mask[range] = "1" * range.count
# mask
# end.to_i(2)
end
# Sets one or more bitfields to 0 within a column
#
# +reset_bitfield :column, :fields
#
# @example
# User.reset_bitfield :column, :daily, :monthly
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) to reset
def reset_bitfields column_name, *fields
mask = reset_mask_for column_name, *fields
update_all "#{column_name} = #{column_name} & #{mask}"
end
alias :reset_bitfield :reset_bitfields
# Increases one or more bitfields by 1 value
#
# +increment_bitfield :column, :fields
#
# @example
# user.increment_bitfield :column, :daily, :monthly
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) to reset
def increment_bitfields column_name, *fields
mask = increment_mask_for column_name, *fields
update_all "#{column_name} = #{column_name} + #{mask}"
end
alias :increment_bitfield :increment_bitfields
# Counts resources grouped by a bitfield
#
# +count_by :column, :fields
#
# @example
# user.count_by :counter, :monthly
#
# @param [ Symbol ] column name of the column these fields are in
# @param [ Symbol ] field(s) name of the field(s) to reset
def count_by column_name, field
inc = increment_mask_for column_name, field
only = only_mask_for column_name, field
# Create super-special-bitfield-grouping-query w/ AREL
sql = arel_table.
project("count(#{primary_key}) as #{field}_count, (#{column_name} & #{only})/#{inc} as #{field}").
group(field).to_sql
connection.send :select, sql, 'AREL' # Execute the query
end
private
def bitfield_setup! column, fields
if defined?(@@bitfields)
@@bitfields[column] = fields
else
@@bitfields = { column => fields }
end
end
end
|
af83/desi | lib/desi/index_manager.rb | Desi.IndexManager.list | ruby | def list(pattern = '.*')
pattern = Regexp.new(pattern || '.*')
@outputter.puts "Indices from host #{@host} matching the pattern #{pattern.inspect}\n\n" if @verbose
list = indices(pattern).sort
list.each {|i| @outputter.puts i.inspect } if @verbose
list
end | Initializes a Desi::IndexManager instance
@param [#to_hash] opts Hash of extra opts
@option opts [#to_s] :host ('http://localhost:9200') Host to manage indices for
@option opts [Boolean] :verbose (nil) Whether to output the actions' result
on STDOUT
@option opts [#new] :http_client_factory (Desi::HttpClient) HTTP transport class
to use
@note The +:http_client_factory+ should return an instance that responds
to #get and #delete
@return [void]
@api public
List index names for the specified cluster
You can restrict the list using a regular expression pattern. (The default
pattern being +/.*/+, all releases will be returned if you do not
specify anything.)
@param [#to_s] pattern ('.*') Regexp pattern used to restrict the selection
@return [Array<String>] List of index names of the ES cluster
@note This method will also output its result on STDOUT if +@verbose+ is
true
@example List all indices whose name begins with "foo"
Desi::IndexManager.new.list('^foo') #=> ["foo1", "foo2", "foo3"]
@api public | train | https://github.com/af83/desi/blob/30c51ce3b484765bd8911baf2fb83a85809cc81c/lib/desi/index_manager.rb#L100-L108 | class IndexManager
class Index
attr_reader :name, :number_of_documents, :aliases, :state, :number_of_documents
def initialize(name, state_data, status_data)
@name = name
@number_of_documents = status_data["docs"]["num_docs"] if status_data && status_data["docs"]
@aliases = []
if state_data
@aliases = state_data['aliases']
@state = state_data['state']
end
end
def to_s
name
end
def inspect
"#{name} (#{number_of_docs_label})#{aliases_label}"
end
def aliased?
!(aliases.nil? || aliases.empty?)
end
def <=>(other)
name <=> other.name
end
def open?
state == "open"
end
def closed?
state == "close"
end
private
def number_of_docs_label
closed? ? 'CLOSED' : "#{number_of_documents} docs"
end
def aliases_label
aliased? ? ". Aliases: #{aliases.join(', ')}" : nil
end
end
# Initializes a Desi::IndexManager instance
#
# @param [#to_hash] opts Hash of extra opts
#
# @option opts [#to_s] :host ('http://localhost:9200') Host to manage indices for
# @option opts [Boolean] :verbose (nil) Whether to output the actions' result
# on STDOUT
# @option opts [#new] :http_client_factory (Desi::HttpClient) HTTP transport class
# to use
#
# @note The +:http_client_factory+ should return an instance that responds
# to #get and #delete
# @return [void]
#
# @api public
def initialize(opts = {})
@host = to_uri(opts.fetch(:host) { Desi.configuration.server })
@verbose = opts[:verbose]
@outputter = opts.fetch(:outputter, Kernel)
@client = opts.fetch(:http_client_factory, Desi::HttpClient).new(@host)
end
# List index names for the specified cluster
#
# You can restrict the list using a regular expression pattern. (The default
# pattern being +/.*/+, all releases will be returned if you do not
# specify anything.)
#
# @param [#to_s] pattern ('.*') Regexp pattern used to restrict the selection
# @return [Array<String>] List of index names of the ES cluster
#
# @note This method will also output its result on STDOUT if +@verbose+ is
# true
#
# @example List all indices whose name begins with "foo"
# Desi::IndexManager.new.list('^foo') #=> ["foo1", "foo2", "foo3"]
#
# @api public
# Delete all indices matching the specified pattern
#
# @param [#to_s] pattern Regexp pattern used to restrict the selection
# @return [void]
#
# @note No confirmation is needed, so beware!
#
# @note This method will also output its result on STDOUT if +@verbose+ is
# true
#
# @example Delete all indices whose name begins with "test"
# Desi::IndexManager.new.delete!('^test') #=> nil
#
# @api public
def delete!(pattern)
warn "You must provide a pattern" and exit if pattern.nil?
@outputter.puts "The following indices from host #{@host} are now deleted" if @verbose
indices(Regexp.new(pattern)).each do |index|
@client.delete("/#{index}")
@outputter.puts " * #{index.inspect}" if @verbose
end
end
# Close all indices matching the specified pattern
#
# @param [#to_s] pattern Regexp pattern used to restrict the selection
# @return [void]
#
# @note No confirmation is needed, so beware!
#
# @note This method will also output its result on STDOUT if +@verbose+ is
# true
#
# @example Close all indices whose name begins with "test"
# Desi::IndexManager.new.close!('^test') #=> nil
#
# @api public
def close!(pattern)
warn "You must provide a pattern" and exit if pattern.nil?
@outputter.puts "The following indices from host #{@host} are now closed" if @verbose
indices(Regexp.new(pattern)).each do |index|
@client.post("/#{index}/_close")
@outputter.puts " * #{index.inspect}" if @verbose
end
end
# Empty (remove all records) from indices matching the specified pattern
#
# @param [#to_s] pattern Regexp pattern used to restrict the selection
# @return [void]
#
# @note No confirmation is needed, so beware!
#
# @note This method will also output its result on STDOUT if +@verbose+ is
# true
#
# @example Empty all indices whose name begins with "log"
# Desi::IndexManager.new.empty!('^log') #=> nil
#
# @api public
def empty!(pattern)
warn "You must provide a pattern" and exit if pattern.nil?
@outputter.puts "The following indices from host #{@host} are now emptied" if @verbose
indices(Regexp.new(pattern)).each do |index|
@client.delete("/#{index}/_query?q=*")
@outputter.puts " * #{index}" if @verbose
end
end
private
def indices(pattern)
cluster_state = JSON.parse(@client.get('/_cluster/state').body)
status = JSON.parse(@client.get('/_status').body)
cluster_state["metadata"]["indices"].map {|k, v|
if k =~ pattern
Index.new(k, v, status['indices'][k])
end
}.compact
end
def to_uri(host_string)
scheme, host, port = ['http', 'localhost', 9200]
%r{(?<scheme>(https?|))(?:\:\/\/|)(?<host>[^:]*?):?(?<port>\d*)/?$}.match(host_string.to_s) do |m|
scheme = m[:scheme] unless m[:scheme].empty?
host = m[:host] unless m[:host].empty?
port = m[:port] unless m[:port].empty?
end
"#{scheme}://#{host}:#{port}"
end
end
|
koraktor/metior | lib/metior/adapter/grit/repository.rb | Metior::Adapter::Grit.Repository.load_line_stats | ruby | def load_line_stats(ids)
if ids.is_a? Range
if ids.first == ''
range = ids.last
else
range = '%s..%s' % [ids.first, ids.last]
end
options = { :numstat => true, :timeout => false }
output = @grit_repo.git.native :log, options, range
commit_stats = ::Grit::CommitStats.list_from_string @grit_repo, output
else
commit_stats = []
ids.each_slice(500) do |id_slice|
options = { :numstat => true, :timeout => false }
output = @grit_repo.git.native :log, options, *id_slice
commit_stats += ::Grit::CommitStats.list_from_string @grit_repo, output
end
end
Hash[commit_stats.map do |stats|
[stats.first, [stats.last.additions, stats.last.deletions]]
end]
end | Loads the line stats for the commits given by a set of commit IDs
@param [Array<String>] ids The IDs of the commits to load line stats for
@return [Hash<String, Array<Fixnum>] An array of two number (line
additions and deletions) for each of the given commit IDs | train | https://github.com/koraktor/metior/blob/02da0f330774c91e1a7325a5a7edbe696f389f95/lib/metior/adapter/grit/repository.rb#L65-L88 | class Repository < Metior::Repository
# Creates a new Git repository based on the given path
#
# This creates a new `Grit::Repo` instance to interface with the
# repository.
#
# @param [String] path The file system path of the repository
def initialize(path)
super path
@grit_repo = ::Grit::Repo.new(path)
end
# Returns the current branch of the repository
#
# This is the target ref of Git's HEAD, i.e. the currently checked out
# branch. For a detached HEAD this may also be the commit ID of the checked
# out commit.
#
# @see Grit::Repo#head
def current_branch
branch = @grit_repo.head
return branch.name unless branch.nil?
commit = @grit_repo.commit('HEAD')
commit.id unless commit.nil?
end
# Returns the unique identifier for the commit the given reference – like a
# branch name – is pointing to
#
# Returns the given ref name immediately if it is a full SHA1 commit ID.
#
# @param [String] ref A symbolic reference name
# @return [String] The SHA1 ID of the commit the reference is pointing to
def id_for_ref(ref)
return ref if ref.match(/[0-9a-f]{40}/)
unless @refs.key? ref
options = { :timeout => false }
sha = @grit_repo.git.native(:rev_parse, options, "#{ref}^{}")
@refs[ref] = sha.rstrip
end
@refs[ref]
end
# Loads the line stats for the commits given by a set of commit IDs
#
# @param [Array<String>] ids The IDs of the commits to load line stats for
# @return [Hash<String, Array<Fixnum>] An array of two number (line
# additions and deletions) for each of the given commit IDs
# Retrieves a raw commit object for the given commit ID
#
# @param [String] id The ID of the commit
# @return [Grit::Commit] The commit object
# @see Grit::Repo#commit
def raw_commit(id)
@grit_repo.commit(id)
end
private
# Loads all branches and the corresponding commit IDs of this repository
#
# @return [Hash<String, String>] The names of all branches and the
# corresponding commit IDs
# @see Grit::Repo#branches
def load_branches
Hash[@grit_repo.branches.map { |b| [b.name, b.commit.id] }]
end
# This method uses Grit to load all commits from the given commit range
#
# Because of some Grit internal limitations, the commits have to be loaded
# in batches of up to 300 commits.
#
# @note Grit will choke on huge repositories, like Homebrew or the Linux
# kernel. You will have to raise the timeout limit using
# `Grit.git_timeout=`.
# @param [String, Range] range The range of commits for which the commits
# should be loaded. This may be given as a string
# (`'master..development'`), a range (`'master'..'development'`)
# or as a single ref (`'master'`). A single ref name means all
# commits reachable from that ref.
# @return [Grit::Commit, nil] The base commit of the requested range or
# `nil` if the the range starts at the beginning of the history
# @return [Array<Grit::Commit>] All commits in the given commit range
# @see Grit::Repo#commits
def load_commits(range)
if range.first == ''
base_commit = nil
range = range.last
else
base_commit = @grit_repo.commit(range.first)
range = '%s..%s' % [range.first, range.last]
end
options = { :pretty => 'raw', :timeout => false }
output = @grit_repo.git.native :rev_list, options, range
commits = ::Grit::Commit.list_from_string @grit_repo, output
[base_commit, commits]
end
# Loads both the name and description of the project contained in the
# repository from the description file in `GIT_DIR`. The first line of that
# file is used as the project's name, the remaining text is used as a
# description of the project.
#
# @see #description
# @see #name
# @see Grit::Repo#name
def load_name_and_description
description = @grit_repo.description
if description.start_with? 'Unnamed repository'
@name = ''
@description = ''
else
description = description.lines.to_a
@name = description.shift.strip
@description = description.join("\n").strip
end
end
alias_method :load_description, :load_name_and_description
alias_method :load_name, :load_name_and_description
# Loads all tags and the corresponding commit IDs of this repository
#
# @return [Hash<String, String>] The names of all tags and the
# corresponding commit IDs
# @see Grit::Repo#tags
def load_tags
Hash[@grit_repo.tags.map { |b| [b.name, b.commit.id] }]
end
end
|
amatsuda/rfd | lib/rfd.rb | Rfd.Controller.move_cursor | ruby | def move_cursor(row = nil)
if row
if (prev_item = items[current_row])
main.draw_item prev_item
end
page = row / max_items
switch_page page if page != current_page
main.activate_pane row / maxy
@current_row = row
else
@current_row = 0
end
item = items[current_row]
main.draw_item item, current: true
main.display current_page
header_l.draw_current_file_info item
@current_row
end | Move the cursor to specified row.
The main window and the headers will be updated reflecting the displayed files and directories.
The row number can be out of range of the current page. | train | https://github.com/amatsuda/rfd/blob/403c0bc0ff0a9da1d21220b479d5a42008512b78/lib/rfd.rb#L151-L170 | class Controller
include Rfd::Commands
attr_reader :header_l, :header_r, :main, :command_line, :items, :displayed_items, :current_row, :current_page, :current_dir, :current_zip
# :nodoc:
def initialize
@main = MainWindow.new
@header_l = HeaderLeftWindow.new
@header_r = HeaderRightWindow.new
@command_line = CommandLineWindow.new
@debug = DebugWindow.new if ENV['DEBUG']
@direction, @dir_history, @last_command, @times, @yanked_items = nil, [], nil, nil, nil
end
# The main loop.
def run
loop do
begin
number_pressed = false
ret = case (c = Curses.getch)
when 10, 13 # enter, return
enter
when 27 # ESC
q
when ' ' # space
space
when 127 # DEL
del
when Curses::KEY_DOWN
j
when Curses::KEY_UP
k
when Curses::KEY_LEFT
h
when Curses::KEY_RIGHT
l
when Curses::KEY_CTRL_A..Curses::KEY_CTRL_Z
chr = ((c - 1 + 65) ^ 0b0100000).chr
public_send "ctrl_#{chr}" if respond_to?("ctrl_#{chr}")
when ?0..?9
public_send c
number_pressed = true
when ?!..?~
if respond_to? c
public_send c
else
debug "key: #{c}" if ENV['DEBUG']
end
when Curses::KEY_MOUSE
if (mouse_event = Curses.getmouse)
case mouse_event.bstate
when Curses::BUTTON1_CLICKED
click y: mouse_event.y, x: mouse_event.x
when Curses::BUTTON1_DOUBLE_CLICKED
double_click y: mouse_event.y, x: mouse_event.x
end
end
else
debug "key: #{c}" if ENV['DEBUG']
end
Curses.doupdate if ret
@times = nil unless number_pressed
rescue StopIteration
raise
rescue => e
command_line.show_error e.to_s
raise if ENV['DEBUG']
end
end
ensure
Curses.close_screen
end
# Change the number of columns in the main window.
def spawn_panes(num)
main.number_of_panes = num
@current_row = @current_page = 0
end
# Number of times to repeat the next command.
def times
(@times || 1).to_i
end
# The file or directory on which the cursor is on.
def current_item
items[current_row]
end
# * marked files and directories.
def marked_items
items.select(&:marked?)
end
# Marked files and directories or Array(the current file or directory).
#
# . and .. will not be included.
def selected_items
((m = marked_items).any? ? m : Array(current_item)).reject {|i| %w(. ..).include? i.name}
end
# Move the cursor to specified row.
#
# The main window and the headers will be updated reflecting the displayed files and directories.
# The row number can be out of range of the current page.
# Change the current directory.
def cd(dir = '~', pushd: true)
dir = load_item path: expand_path(dir) unless dir.is_a? Item
unless dir.zip?
Dir.chdir dir
@current_zip = nil
else
@current_zip = dir
end
@dir_history << current_dir if current_dir && pushd
@current_dir, @current_page, @current_row = dir, 0, nil
main.activate_pane 0
ls
@current_dir
end
# cd to the previous directory.
def popd
cd @dir_history.pop, pushd: false if @dir_history.any?
end
# Fetch files from current directory.
# Then update each windows reflecting the newest information.
def ls
fetch_items_from_filesystem_or_zip
sort_items_according_to_current_direction
@current_page ||= 0
draw_items
move_cursor (current_row ? [current_row, items.size - 1].min : nil)
draw_marked_items
draw_total_items
true
end
# Sort the whole files and directories in the current directory, then refresh the screen.
#
# ==== Parameters
# * +direction+ - Sort order in a String.
# nil : order by name
# r : reverse order by name
# s, S : order by file size
# sr, Sr: reverse order by file size
# t : order by mtime
# tr : reverse order by mtime
# c : order by ctime
# cr : reverse order by ctime
# u : order by atime
# ur : reverse order by atime
# e : order by extname
# er : reverse order by extname
def sort(direction = nil)
@direction, @current_page = direction, 0
sort_items_according_to_current_direction
switch_page 0
move_cursor 0
end
# Change the file permission of the selected files and directories.
#
# ==== Parameters
# * +mode+ - Unix chmod string (e.g. +w, g-r, 755, 0644)
def chmod(mode = nil)
return unless mode
begin
Integer mode
mode = Integer mode.size == 3 ? "0#{mode}" : mode
rescue ArgumentError
end
FileUtils.chmod mode, selected_items.map(&:path)
ls
end
# Change the file owner of the selected files and directories.
#
# ==== Parameters
# * +user_and_group+ - user name and group name separated by : (e.g. alice, nobody:nobody, :admin)
def chown(user_and_group)
return unless user_and_group
user, group = user_and_group.split(':').map {|s| s == '' ? nil : s}
FileUtils.chown user, group, selected_items.map(&:path)
ls
end
# Fetch files from current directory or current .zip file.
def fetch_items_from_filesystem_or_zip
unless in_zip?
@items = Dir.foreach(current_dir).map {|fn|
load_item dir: current_dir, name: fn
}.to_a.partition {|i| %w(. ..).include? i.name}.flatten
else
@items = [load_item(dir: current_dir, name: '.', stat: File.stat(current_dir)),
load_item(dir: current_dir, name: '..', stat: File.stat(File.dirname(current_dir)))]
zf = Zip::File.new current_dir
zf.each {|entry|
next if entry.name_is_directory?
stat = zf.file.stat entry.name
@items << load_item(dir: current_dir, name: entry.name, stat: stat)
}
end
end
# Focus at the first file or directory of which name starts with the given String.
def find(str)
index = items.index {|i| i.index > current_row && i.name.start_with?(str)} || items.index {|i| i.name.start_with? str}
move_cursor index if index
end
# Focus at the last file or directory of which name starts with the given String.
def find_reverse(str)
index = items.reverse.index {|i| i.index < current_row && i.name.start_with?(str)} || items.reverse.index {|i| i.name.start_with? str}
move_cursor items.size - index - 1 if index
end
# Height of the currently active pane.
def maxy
main.maxy
end
# Number of files or directories that the current main window can show in a page.
def max_items
main.max_items
end
# Update the main window with the loaded files and directories. Also update the header.
def draw_items
main.newpad items
@displayed_items = items[current_page * max_items, max_items]
main.display current_page
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Sort the loaded files and directories in already given sort order.
def sort_items_according_to_current_direction
case @direction
when nil
@items = items.shift(2) + items.partition(&:directory?).flat_map(&:sort)
when 'r'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort.reverse}
when 'S', 's'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by {|i| -i.size}}
when 'Sr', 'sr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:size)}
when 't'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.mtime <=> x.mtime}}
when 'tr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:mtime)}
when 'c'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.ctime <=> x.ctime}}
when 'cr'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:ctime)}
when 'u'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.atime <=> x.atime}}
when 'ur'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:atime)}
when 'e'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort {|x, y| y.extname <=> x.extname}}
when 'er'
@items = items.shift(2) + items.partition(&:directory?).flat_map {|arr| arr.sort_by(&:extname)}
end
items.each.with_index {|item, index| item.index = index}
end
# Search files and directories from the current directory, and update the screen.
#
# * +pattern+ - Search pattern against file names in Ruby Regexp string.
#
# === Example
#
# a : Search files that contains the letter "a" in their file name
# .*\.pdf$ : Search PDF files
def grep(pattern = '.*')
regexp = Regexp.new(pattern)
fetch_items_from_filesystem_or_zip
@items = items.shift(2) + items.select {|i| i.name =~ regexp}
sort_items_according_to_current_direction
draw_items
draw_total_items
switch_page 0
move_cursor 0
end
# Copy selected files and directories to the destination.
def cp(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.cp_r src, expand_path(dest)
else
raise 'cping multiple items in .zip is not supported.' if selected_items.size > 1
Zip::File.open(current_zip) do |zip|
entry = zip.find_entry(selected_items.first.name).dup
entry.name, entry.name_length = dest, dest.size
zip.instance_variable_get(:@entry_set) << entry
end
end
ls
end
# Move selected files and directories to the destination.
def mv(dest)
unless in_zip?
src = (m = marked_items).any? ? m.map(&:path) : current_item
FileUtils.mv src, expand_path(dest)
else
raise 'mving multiple items in .zip is not supported.' if selected_items.size > 1
rename "#{selected_items.first.name}/#{dest}"
end
ls
end
# Rename selected files and directories.
#
# ==== Parameters
# * +pattern+ - new filename, or a shash separated Regexp like string
def rename(pattern)
from, to = pattern.sub(/^\//, '').sub(/\/$/, '').split '/'
if to.nil?
from, to = current_item.name, from
else
from = Regexp.new from
end
unless in_zip?
selected_items.each do |item|
name = item.name.gsub from, to
FileUtils.mv item, current_dir.join(name) if item.name != name
end
else
Zip::File.open(current_zip) do |zip|
selected_items.each do |item|
name = item.name.gsub from, to
zip.rename item.name, name
end
end
end
ls
end
# Soft delete selected files and directories.
#
# If the OS is not OSX, performs the same as `delete` command.
def trash
unless in_zip?
if osx?
FileUtils.mv selected_items.map(&:path), File.expand_path('~/.Trash/')
else
#TODO support other OS
FileUtils.rm_rf selected_items.map(&:path)
end
else
return unless ask %Q[Trashing zip entries is not supported. Actually the files will be deleted. Are you sure want to proceed? (y/n)]
delete
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Delete selected files and directories.
def delete
unless in_zip?
FileUtils.rm_rf selected_items.map(&:path)
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
if entry.name_is_directory?
zip.dir.delete entry.to_s
else
zip.file.delete entry.to_s
end
end
end
end
@current_row -= selected_items.count {|i| i.index <= current_row}
ls
end
# Create a new directory.
def mkdir(dir)
unless in_zip?
FileUtils.mkdir_p current_dir.join(dir)
else
Zip::File.open(current_zip) do |zip|
zip.dir.mkdir dir
end
end
ls
end
# Create a new empty file.
def touch(filename)
unless in_zip?
FileUtils.touch current_dir.join(filename)
else
Zip::File.open(current_zip) do |zip|
# zip.file.open(filename, 'w') {|_f| } #HAXX this code creates an unneeded temporary file
zip.instance_variable_get(:@entry_set) << Zip::Entry.new(current_zip, filename)
end
end
ls
end
# Create a symlink to the current file or directory.
def symlink(name)
FileUtils.ln_s current_item, name
ls
end
# Yank selected file / directory names.
def yank
@yanked_items = selected_items
end
# Paste yanked files / directories here.
def paste
if @yanked_items
if current_item.directory?
FileUtils.cp_r @yanked_items.map(&:path), current_item
else
@yanked_items.each do |item|
if items.include? item
i = 1
while i += 1
new_item = load_item dir: current_dir, name: "#{item.basename}_#{i}#{item.extname}", stat: item.stat
break unless File.exist? new_item.path
end
FileUtils.cp_r item, new_item
else
FileUtils.cp_r item, current_dir
end
end
end
ls
end
end
# Copy selected files and directories' path into clipboard on OSX.
def clipboard
IO.popen('pbcopy', 'w') {|f| f << selected_items.map(&:path).join(' ')} if osx?
end
# Archive selected files and directories into a .zip file.
def zip(zipfile_name)
return unless zipfile_name
zipfile_name += '.zip' unless zipfile_name.end_with? '.zip'
Zip::File.open(zipfile_name, Zip::File::CREATE) do |zipfile|
selected_items.each do |item|
next if item.symlink?
if item.directory?
Dir[item.join('**/**')].each do |file|
zipfile.add file.sub("#{current_dir}/", ''), file
end
else
zipfile.add item.name, item
end
end
end
ls
end
# Unarchive .zip and .tar.gz files within selected files and directories into current_directory.
def unarchive
unless in_zip?
zips, gzs = selected_items.partition(&:zip?).tap {|z, others| break [z, *others.partition(&:gz?)]}
zips.each do |item|
FileUtils.mkdir_p current_dir.join(item.basename)
Zip::File.open(item) do |zip|
zip.each do |entry|
FileUtils.mkdir_p File.join(item.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(item.basename, entry.to_s)) { true }
end
end
end
gzs.each do |item|
Zlib::GzipReader.open(item) do |gz|
Gem::Package::TarReader.new(gz) do |tar|
dest_dir = current_dir.join (gz.orig_name || item.basename).sub(/\.tar$/, '')
tar.each do |entry|
dest = nil
if entry.full_name == '././@LongLink'
dest = File.join dest_dir, entry.read.strip
next
end
dest ||= File.join dest_dir, entry.full_name
if entry.directory?
FileUtils.mkdir_p dest, :mode => entry.header.mode
elsif entry.file?
FileUtils.mkdir_p dest_dir
File.open(dest, 'wb') {|f| f.print entry.read}
FileUtils.chmod entry.header.mode, dest
elsif entry.header.typeflag == '2' # symlink
File.symlink entry.header.linkname, dest
end
unless Dir.exist? dest_dir
FileUtils.mkdir_p dest_dir
File.open(File.join(dest_dir, gz.orig_name || item.basename), 'wb') {|f| f.print gz.read}
end
end
end
end
end
else
Zip::File.open(current_zip) do |zip|
zip.select {|e| selected_items.map(&:name).include? e.to_s}.each do |entry|
FileUtils.mkdir_p File.join(current_zip.dir, current_zip.basename, File.dirname(entry.to_s))
zip.extract(entry, File.join(current_zip.dir, current_zip.basename, entry.to_s)) { true }
end
end
end
ls
end
# Current page is the first page?
def first_page?
current_page == 0
end
# Do we have more pages?
def last_page?
current_page == total_pages - 1
end
# Number of pages in the current directory.
def total_pages
(items.size - 1) / max_items + 1
end
# Move to the given page number.
#
# ==== Parameters
# * +page+ - Target page number
def switch_page(page)
main.display (@current_page = page)
@displayed_items = items[current_page * max_items, max_items]
header_l.draw_path_and_page_number path: current_dir.path, current: current_page + 1, total: total_pages
end
# Update the header information concerning currently marked files or directories.
def draw_marked_items
items = marked_items
header_r.draw_marked_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Update the header information concerning total files and directories in the current directory.
def draw_total_items
header_r.draw_total_items count: items.size, size: items.inject(0) {|sum, i| sum += i.size}
end
# Swktch on / off marking on the current file or directory.
def toggle_mark
main.toggle_mark current_item
end
# Get a char as a String from user input.
def get_char
c = Curses.getch
c if (0..255) === c.ord
end
def clear_command_line
command_line.writeln 0, ""
command_line.clear
command_line.noutrefresh
end
# Accept user input, and directly execute it as a Ruby method call to the controller.
#
# ==== Parameters
# * +preset_command+ - A command that would be displayed at the command line before user input.
def process_command_line(preset_command: nil)
prompt = preset_command ? ":#{preset_command} " : ':'
command_line.set_prompt prompt
cmd, *args = command_line.get_command(prompt: prompt).split(' ')
if cmd && !cmd.empty? && respond_to?(cmd)
ret = self.public_send cmd, *args
clear_command_line
ret
end
rescue Interrupt
clear_command_line
end
# Accept user input, and directly execute it in an external shell.
def process_shell_command
command_line.set_prompt ':!'
cmd = command_line.get_command(prompt: ':!')[1..-1]
execute_external_command pause: true do
system cmd
end
rescue Interrupt
ensure
command_line.clear
command_line.noutrefresh
end
# Let the user answer y or n.
#
# ==== Parameters
# * +prompt+ - Prompt message
def ask(prompt = '(y/n)')
command_line.set_prompt prompt
command_line.refresh
while (c = Curses.getch)
next unless [?N, ?Y, ?n, ?y, 3, 27] .include? c # N, Y, n, y, ^c, esc
command_line.clear
command_line.noutrefresh
break (c == 'y') || (c == 'Y')
end
end
# Open current file or directory with the editor.
def edit
execute_external_command do
editor = ENV['EDITOR'] || 'vim'
unless in_zip?
system %Q[#{editor} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
system %Q[#{editor} "#{tmpfile_name}"]
zip.add(current_item.name, tmpfile_name) { true }
end
ls
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
# Open current file or directory with the viewer.
def view
pager = ENV['PAGER'] || 'less'
execute_external_command do
unless in_zip?
system %Q[#{pager} "#{current_item.path}"]
else
begin
tmpdir, tmpfile_name = nil
Zip::File.open(current_zip) do |zip|
tmpdir = Dir.mktmpdir
FileUtils.mkdir_p File.join(tmpdir, File.dirname(current_item.name))
tmpfile_name = File.join(tmpdir, current_item.name)
File.open(tmpfile_name, 'w') {|f| f.puts zip.file.read(current_item.name)}
end
system %Q[#{pager} "#{tmpfile_name}"]
ensure
FileUtils.remove_entry_secure tmpdir if tmpdir
end
end
end
end
def move_cursor_by_click(y: nil, x: nil)
if (idx = main.pane_index_at(y: y, x: x))
row = current_page * max_items + main.maxy * idx + y - main.begy
move_cursor row if (row >= 0) && (row < items.size)
end
end
private
def execute_external_command(pause: false)
Curses.def_prog_mode
Curses.close_screen
yield
ensure
Curses.reset_prog_mode
Curses.getch if pause
#NOTE needs to draw borders and ls again here since the stdlib Curses.refresh fails to retrieve the previous screen
Rfd::Window.draw_borders
Curses.refresh
ls
end
def expand_path(path)
File.expand_path path.start_with?('/', '~') ? path : current_dir ? current_dir.join(path) : path
end
def load_item(path: nil, dir: nil, name: nil, stat: nil)
Item.new dir: dir || File.dirname(path), name: name || File.basename(path), stat: stat, window_width: main.width
end
def osx?
@_osx ||= RbConfig::CONFIG['host_os'] =~ /darwin/
end
def in_zip?
@current_zip
end
def debug(str)
@debug.debug str
end
end
|
bdurand/json_record | lib/json_record/field_definition.rb | JsonRecord.FieldDefinition.default | ruby | def default
if @default.nil?
nil
elsif @default.is_a?(Numeric) || @default.is_a?(Symbol) || @default.is_a?(TrueClass) || @default.is_a?(FalseClass)
@default
else
@default.dup rescue @default
end
end | Define a field. Options should include :type with the class of the field. Other options available are
:multivalued and :default.
Get the default value. | train | https://github.com/bdurand/json_record/blob/463f4719d9618f6d2406c0aab6028e0156f7c775/lib/json_record/field_definition.rb#L24-L32 | class FieldDefinition
BOOLEAN_MAPPING = {
true => true, 'true' => true, 'TRUE' => true, 'True' => true, 't' => true, 'T' => true, '1' => true, 1 => true, 1.0 => true,
false => false, 'false' => false, 'FALSE' => false, 'False' => false, 'f' => false, 'F' => false, '0' => false, 0 => false, 0.0 => false, nil => false
}
attr_reader :name, :type
# Define a field. Options should include :type with the class of the field. Other options available are
# :multivalued and :default.
def initialize (name, options = {})
@name = name.to_s
@type = options[:type] || String
@multivalued = !!options[:multivalued]
@default = options[:default]
if [Hash, Array].include?(@type) and @default.nil?
@default = @type.new
end
end
# Get the default value.
# Indicates the field is multivalued.
def multivalued?
@multivalued
end
# Convert a value to the proper class for storing it in the field. If the value can't be converted,
# the original value will be returned. Blank values are always translated to nil. Hashes will be converted
# to EmbeddedDocument objects if the field type extends from EmbeddedDocument.
def convert (val)
return nil if val.blank? and val != false
if @type == String
return val.to_s
elsif @type == Integer
return Kernel.Integer(val) rescue val
elsif @type == Float
return Kernel.Float(val) rescue val
elsif @type == Boolean
v = BOOLEAN_MAPPING[val]
v = val.to_s.downcase == 'true' if v.nil? # Check all mixed case spellings for true
return v
elsif @type == Date
if val.is_a?(Date)
return val
elsif val.is_a?(Time)
return val.to_date
else
return Date.parse(val.to_s) rescue val
end
elsif @type == Time
if val.is_a?(Time)
return Time.at((val.to_i / 60) * 60).utc
else
return Time.parse(val).utc rescue val
end
elsif @type == DateTime
if val.is_a?(DateTime)
return val.utc
else
return DateTime.parse(val).utc rescue val
end
elsif @type == Array
val = [val] unless val.is_a?(Array)
raise ArgumentError.new("#{name} must be an Array") unless val.is_a?(Array)
return val
elsif @type == Hash
raise ArgumentError.new("#{name} must be a Hash") unless val.is_a?(Hash)
return val
elsif @type == BigDecimal
return BigDecimal.new(val.to_s)
else
if val.is_a?(@type)
val
elsif val.is_a?(Hash) and (@type < EmbeddedDocument)
return @type.new(val)
else
raise ArgumentError.new("#{name} must be a #{@type}")
end
end
end
end
|
igrigorik/http-2 | lib/http/2/connection.rb | HTTP2.Connection.connection_settings | ruby | def connection_settings(frame)
connection_error unless frame[:type] == :settings && (frame[:stream]).zero?
# Apply settings.
# side =
# local: previously sent and pended our settings should be effective
# remote: just received peer settings should immediately be effective
settings, side = if frame[:flags].include?(:ack)
# Process pending settings we have sent.
[@pending_settings.shift, :local]
else
connection_error(check) if validate_settings(@remote_role, frame[:payload])
[frame[:payload], :remote]
end
settings.each do |key, v|
case side
when :local
@local_settings[key] = v
when :remote
@remote_settings[key] = v
end
case key
when :settings_max_concurrent_streams
# Do nothing.
# The value controls at the next attempt of stream creation.
when :settings_initial_window_size
# A change to SETTINGS_INITIAL_WINDOW_SIZE could cause the available
# space in a flow control window to become negative. A sender MUST
# track the negative flow control window, and MUST NOT send new flow
# controlled frames until it receives WINDOW_UPDATE frames that cause
# the flow control window to become positive.
case side
when :local
@local_window = @local_window - @local_window_limit + v
@streams.each do |_id, stream|
stream.emit(:local_window, stream.local_window - @local_window_limit + v)
end
@local_window_limit = v
when :remote
@remote_window = @remote_window - @remote_window_limit + v
@streams.each do |_id, stream|
# Event name is :window, not :remote_window
stream.emit(:window, stream.remote_window - @remote_window_limit + v)
end
@remote_window_limit = v
end
when :settings_header_table_size
# Setting header table size might cause some headers evicted
case side
when :local
@compressor.table_size = v
when :remote
@decompressor.table_size = v
end
when :settings_enable_push
# nothing to do
when :settings_max_frame_size
# nothing to do
# else # ignore unknown settings
end
end
case side
when :local
# Received a settings_ack. Notify application layer.
emit(:settings_ack, frame, @pending_settings.size)
when :remote
unless @state == :closed || @h2c_upgrade == :start
# Send ack to peer
send(type: :settings, stream: 0, payload: [], flags: [:ack])
end
end
end | Update connection settings based on parameters set by the peer.
@param frame [Hash] | train | https://github.com/igrigorik/http-2/blob/d52934f144db97fc7534e4c6025ed6ae86909b6a/lib/http/2/connection.rb#L528-L609 | class Connection
include FlowBuffer
include Emitter
include Error
# Connection state (:new, :closed).
attr_reader :state
# Size of current connection flow control window (by default, set to
# infinity, but is automatically updated on receipt of peer settings).
attr_reader :local_window
attr_reader :remote_window
alias window local_window
# Current settings value for local and peer
attr_reader :local_settings
attr_reader :remote_settings
# Pending settings value
# Sent but not ack'ed settings
attr_reader :pending_settings
# Number of active streams between client and server (reserved streams
# are not counted towards the stream limit).
attr_reader :active_stream_count
# Initializes new connection object.
#
def initialize(**settings)
@local_settings = DEFAULT_CONNECTION_SETTINGS.merge(settings)
@remote_settings = SPEC_DEFAULT_CONNECTION_SETTINGS.dup
@compressor = Header::Compressor.new(settings)
@decompressor = Header::Decompressor.new(settings)
@active_stream_count = 0
@streams = {}
@streams_recently_closed = {}
@pending_settings = []
@framer = Framer.new
@local_window_limit = @local_settings[:settings_initial_window_size]
@local_window = @local_window_limit
@remote_window_limit = @remote_settings[:settings_initial_window_size]
@remote_window = @remote_window_limit
@recv_buffer = Buffer.new
@send_buffer = []
@continuation = []
@error = nil
@h2c_upgrade = nil
@closed_since = nil
end
def closed?
@state == :closed
end
# Allocates new stream for current connection.
#
# @param priority [Integer]
# @param window [Integer]
# @param parent [Stream]
def new_stream(**args)
fail ConnectionClosed if @state == :closed
fail StreamLimitExceeded if @active_stream_count >= @remote_settings[:settings_max_concurrent_streams]
stream = activate_stream(id: @stream_id, **args)
@stream_id += 2
stream
end
# Sends PING frame to the peer.
#
# @param payload [String] optional payload must be 8 bytes long
# @param blk [Proc] callback to execute when PONG is received
def ping(payload, &blk)
send(type: :ping, stream: 0, payload: payload)
once(:ack, &blk) if blk
end
# Sends a GOAWAY frame indicating that the peer should stop creating
# new streams for current connection.
#
# Endpoints MAY append opaque data to the payload of any GOAWAY frame.
# Additional debug data is intended for diagnostic purposes only and
# carries no semantic value. Debug data MUST NOT be persistently stored,
# since it could contain sensitive information.
#
# @param error [Symbol]
# @param payload [String]
def goaway(error = :no_error, payload = nil)
last_stream = if (max = @streams.max)
max.first
else
0
end
send(type: :goaway, last_stream: last_stream,
error: error, payload: payload)
@state = :closed
@closed_since = Time.now
end
# Sends a WINDOW_UPDATE frame to the peer.
#
# @param increment [Integer]
def window_update(increment)
@local_window += increment
send(type: :window_update, stream: 0, increment: increment)
end
# Sends a connection SETTINGS frame to the peer.
# The values are reflected when the corresponding ACK is received.
#
# @param settings [Array or Hash]
def settings(payload)
payload = payload.to_a
connection_error if validate_settings(@local_role, payload)
@pending_settings << payload
send(type: :settings, stream: 0, payload: payload)
@pending_settings << payload
end
# Decodes incoming bytes into HTTP 2.0 frames and routes them to
# appropriate receivers: connection frames are handled directly, and
# stream frames are passed to appropriate stream objects.
#
# @param data [String] Binary encoded string
def receive(data)
@recv_buffer << data
# Upon establishment of a TCP connection and determination that
# HTTP/2.0 will be used by both peers, each endpoint MUST send a
# connection header as a final confirmation and to establish the
# initial settings for the HTTP/2.0 connection.
#
# Client connection header is 24 byte connection header followed by
# SETTINGS frame. Server connection header is SETTINGS frame only.
if @state == :waiting_magic
if @recv_buffer.size < 24
if !CONNECTION_PREFACE_MAGIC.start_with? @recv_buffer
fail HandshakeError
else
return # maybe next time
end
elsif @recv_buffer.read(24) == CONNECTION_PREFACE_MAGIC
# MAGIC is OK. Send our settings
@state = :waiting_connection_preface
payload = @local_settings.reject { |k, v| v == SPEC_DEFAULT_CONNECTION_SETTINGS[k] }
settings(payload)
else
fail HandshakeError
end
end
while (frame = @framer.parse(@recv_buffer))
emit(:frame_received, frame)
# Header blocks MUST be transmitted as a contiguous sequence of frames
# with no interleaved frames of any other type, or from any other stream.
unless @continuation.empty?
unless frame[:type] == :continuation && frame[:stream] == @continuation.first[:stream]
connection_error
end
@continuation << frame
return unless frame[:flags].include? :end_headers
payload = @continuation.map { |f| f[:payload] }.join
frame = @continuation.shift
@continuation.clear
frame.delete(:length)
frame[:payload] = Buffer.new(payload)
frame[:flags] << :end_headers
end
# SETTINGS frames always apply to a connection, never a single stream.
# The stream identifier for a settings frame MUST be zero. If an
# endpoint receives a SETTINGS frame whose stream identifier field is
# anything other than 0x0, the endpoint MUST respond with a connection
# error (Section 5.4.1) of type PROTOCOL_ERROR.
if connection_frame?(frame)
connection_management(frame)
else
case frame[:type]
when :headers
# When server receives even-numbered stream identifier,
# the endpoint MUST respond with a connection error of type PROTOCOL_ERROR.
connection_error if frame[:stream].even? && self.is_a?(Server)
# The last frame in a sequence of HEADERS/CONTINUATION
# frames MUST have the END_HEADERS flag set.
unless frame[:flags].include? :end_headers
@continuation << frame
return
end
# After sending a GOAWAY frame, the sender can discard frames
# for new streams. However, any frames that alter connection
# state cannot be completely ignored. For instance, HEADERS,
# PUSH_PROMISE and CONTINUATION frames MUST be minimally
# processed to ensure a consistent compression state
decode_headers(frame)
return if @state == :closed
stream = @streams[frame[:stream]]
if stream.nil?
stream = activate_stream(
id: frame[:stream],
weight: frame[:weight] || DEFAULT_WEIGHT,
dependency: frame[:dependency] || 0,
exclusive: frame[:exclusive] || false,
)
emit(:stream, stream)
end
stream << frame
when :push_promise
# The last frame in a sequence of PUSH_PROMISE/CONTINUATION
# frames MUST have the END_HEADERS flag set
unless frame[:flags].include? :end_headers
@continuation << frame
return
end
decode_headers(frame)
return if @state == :closed
# PUSH_PROMISE frames MUST be associated with an existing, peer-
# initiated stream... A receiver MUST treat the receipt of a
# PUSH_PROMISE on a stream that is neither "open" nor
# "half-closed (local)" as a connection error (Section 5.4.1) of
# type PROTOCOL_ERROR. Similarly, a receiver MUST treat the
# receipt of a PUSH_PROMISE that promises an illegal stream
# identifier (Section 5.1.1) (that is, an identifier for a stream
# that is not currently in the "idle" state) as a connection error
# (Section 5.4.1) of type PROTOCOL_ERROR, unless the receiver
# recently sent a RST_STREAM frame to cancel the associated stream.
parent = @streams[frame[:stream]]
pid = frame[:promise_stream]
# if PUSH parent is recently closed, RST_STREAM the push
if @streams_recently_closed[frame[:stream]]
send(type: :rst_stream, stream: pid, error: :refused_stream)
return
end
connection_error(msg: 'missing parent ID') if parent.nil?
unless parent.state == :open || parent.state == :half_closed_local
# An endpoint might receive a PUSH_PROMISE frame after it sends
# RST_STREAM. PUSH_PROMISE causes a stream to become "reserved".
# The RST_STREAM does not cancel any promised stream. Therefore, if
# promised streams are not desired, a RST_STREAM can be used to
# close any of those streams.
if parent.closed == :local_rst
# We can either (a) 'resurrect' the parent, or (b) RST_STREAM
# ... sticking with (b), might need to revisit later.
send(type: :rst_stream, stream: pid, error: :refused_stream)
else
connection_error
end
end
stream = activate_stream(id: pid, parent: parent)
emit(:promise, stream)
stream << frame
else
if (stream = @streams[frame[:stream]])
stream << frame
if frame[:type] == :data
update_local_window(frame)
calculate_window_update(@local_window_limit)
end
else
case frame[:type]
# The PRIORITY frame can be sent for a stream in the "idle" or
# "closed" state. This allows for the reprioritization of a
# group of dependent streams by altering the priority of an
# unused or closed parent stream.
when :priority
stream = activate_stream(
id: frame[:stream],
weight: frame[:weight] || DEFAULT_WEIGHT,
dependency: frame[:dependency] || 0,
exclusive: frame[:exclusive] || false,
)
emit(:stream, stream)
stream << frame
# WINDOW_UPDATE can be sent by a peer that has sent a frame
# bearing the END_STREAM flag. This means that a receiver could
# receive a WINDOW_UPDATE frame on a "half-closed (remote)" or
# "closed" stream. A receiver MUST NOT treat this as an error
# (see Section 5.1).
when :window_update
process_window_update(frame)
else
# An endpoint that receives an unexpected stream identifier
# MUST respond with a connection error of type PROTOCOL_ERROR.
connection_error
end
end
end
end
end
rescue StandardError => e
raise if e.is_a?(Error::Error)
connection_error(e: e)
end
def <<(*args)
receive(*args)
end
private
# Send an outgoing frame. DATA frames are subject to connection flow
# control and may be split and / or buffered based on current window size.
# All other frames are sent immediately.
#
# @note all frames are currently delivered in FIFO order.
# @param frame [Hash]
def send(frame)
emit(:frame_sent, frame)
if frame[:type] == :data
send_data(frame, true)
else
# An endpoint can end a connection at any time. In particular, an
# endpoint MAY choose to treat a stream error as a connection error.
if frame[:type] == :rst_stream && frame[:error] == :protocol_error
goaway(frame[:error])
else
# HEADERS and PUSH_PROMISE may generate CONTINUATION. Also send
# RST_STREAM that are not protocol errors
frames = encode(frame)
frames.each { |f| emit(:frame, f) }
end
end
end
# Applies HTTP 2.0 binary encoding to the frame.
#
# @param frame [Hash]
# @return [Array of Buffer] encoded frame
def encode(frame)
frames = if frame[:type] == :headers || frame[:type] == :push_promise
encode_headers(frame) # HEADERS and PUSH_PROMISE may create more than one frame
else
[frame] # otherwise one frame
end
frames.map { |f| @framer.generate(f) }
end
# Check if frame is a connection frame: SETTINGS, PING, GOAWAY, and any
# frame addressed to stream ID = 0.
#
# @param frame [Hash]
# @return [Boolean]
def connection_frame?(frame)
(frame[:stream]).zero? ||
frame[:type] == :settings ||
frame[:type] == :ping ||
frame[:type] == :goaway
end
# Process received connection frame (stream ID = 0).
# - Handle SETTINGS updates
# - Connection flow control (WINDOW_UPDATE)
# - Emit PONG auto-reply to PING frames
# - Mark connection as closed on GOAWAY
#
# @param frame [Hash]
def connection_management(frame)
case @state
when :waiting_connection_preface
# The first frame MUST be a SETTINGS frame at the start of a connection.
@state = :connected
connection_settings(frame)
when :connected
case frame[:type]
when :settings
connection_settings(frame)
when :window_update
@remote_window += frame[:increment]
send_data(nil, true)
when :ping
if frame[:flags].include? :ack
emit(:ack, frame[:payload])
else
send(type: :ping, stream: 0,
flags: [:ack], payload: frame[:payload])
end
when :goaway
# Receivers of a GOAWAY frame MUST NOT open additional streams on
# the connection, although a new connection can be established
# for new streams.
@state = :closed
@closed_since = Time.now
emit(:goaway, frame[:last_stream], frame[:error], frame[:payload])
when :altsvc
# 4. The ALTSVC HTTP/2 Frame
# An ALTSVC frame on stream 0 with empty (length 0) "Origin"
# information is invalid and MUST be ignored.
if frame[:origin] && !frame[:origin].empty?
emit(frame[:type], frame)
end
when :blocked
emit(frame[:type], frame)
else
connection_error
end
when :closed
connection_error if (Time.now - @closed_since) > 15
else
connection_error
end
end
# Validate settings parameters. See sepc Section 6.5.2.
#
# @param role [Symbol] The sender's role: :client or :server
# @return nil if no error. Exception object in case of any error.
def validate_settings(role, settings)
settings.each do |key, v|
case key
when :settings_header_table_size
# Any value is valid
when :settings_enable_push
case role
when :server
# Section 8.2
# Clients MUST reject any attempt to change the
# SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating the
# message as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return ProtocolError.new("invalid #{key} value") unless v.zero?
when :client
# Any value other than 0 or 1 MUST be treated as a
# connection error (Section 5.4.1) of type PROTOCOL_ERROR.
unless v.zero? || v == 1
return ProtocolError.new("invalid #{key} value")
end
end
when :settings_max_concurrent_streams
# Any value is valid
when :settings_initial_window_size
# Values above the maximum flow control window size of 2^31-1 MUST
# be treated as a connection error (Section 5.4.1) of type
# FLOW_CONTROL_ERROR.
unless v <= 0x7fffffff
return FlowControlError.new("invalid #{key} value")
end
when :settings_max_frame_size
# The initial value is 2^14 (16,384) octets. The value advertised
# by an endpoint MUST be between this initial value and the maximum
# allowed frame size (2^24-1 or 16,777,215 octets), inclusive.
# Values outside this range MUST be treated as a connection error
# (Section 5.4.1) of type PROTOCOL_ERROR.
unless v >= 16_384 && v <= 16_777_215
return ProtocolError.new("invalid #{key} value")
end
when :settings_max_header_list_size
# Any value is valid
# else # ignore unknown settings
end
end
nil
end
# Update connection settings based on parameters set by the peer.
#
# @param frame [Hash]
# Decode headers payload and update connection decompressor state.
#
# The receiver endpoint reassembles the header block by concatenating
# the individual fragments, then decompresses the block to reconstruct
# the header set - aka, header payloads are buffered until END_HEADERS,
# or an END_PROMISE flag is seen.
#
# @param frame [Hash]
def decode_headers(frame)
if frame[:payload].is_a? Buffer
frame[:payload] = @decompressor.decode(frame[:payload])
end
rescue CompressionError => e
connection_error(:compression_error, e: e)
rescue ProtocolError => e
connection_error(:protocol_error, e: e)
rescue StandardError => e
connection_error(:internal_error, e: e)
end
# Encode headers payload and update connection compressor state.
#
# @param frame [Hash]
# @return [Array of Frame]
def encode_headers(frame)
payload = frame[:payload]
payload = @compressor.encode(payload) unless payload.is_a? Buffer
frames = []
while payload.bytesize > 0
cont = frame.dup
cont[:type] = :continuation
cont[:flags] = []
cont[:payload] = payload.slice!(0, @remote_settings[:settings_max_frame_size])
frames << cont
end
if frames.empty?
frames = [frame]
else
frames.first[:type] = frame[:type]
frames.first[:flags] = frame[:flags] - [:end_headers]
frames.last[:flags] << :end_headers
end
frames
rescue StandardError => e
connection_error(:compression_error, e: e)
nil
end
# Activates new incoming or outgoing stream and registers appropriate
# connection managemet callbacks.
#
# @param id [Integer]
# @param priority [Integer]
# @param window [Integer]
# @param parent [Stream]
def activate_stream(id: nil, **args)
connection_error(msg: 'Stream ID already exists') if @streams.key?(id)
stream = Stream.new({ connection: self, id: id }.merge(args))
# Streams that are in the "open" state, or either of the "half closed"
# states count toward the maximum number of streams that an endpoint is
# permitted to open.
stream.once(:active) { @active_stream_count += 1 }
stream.once(:close) do
@active_stream_count -= 1
# Store a reference to the closed stream, such that we can respond
# to any in-flight frames while close is registered on both sides.
# References to such streams will be purged whenever another stream
# is closed, with a minimum of 15s RTT time window.
@streams_recently_closed[id] = Time.now
to_delete = @streams_recently_closed.select { |_, v| (Time.now - v) > 15 }
to_delete.each do |stream_id|
@streams.delete stream_id
@streams_recently_closed.delete stream_id
end
end
stream.on(:promise, &method(:promise)) if self.is_a? Server
stream.on(:frame, &method(:send))
@streams[id] = stream
end
# Emit GOAWAY error indicating to peer that the connection is being
# aborted, and once sent, raise a local exception.
#
# @param error [Symbol]
# @option error [Symbol] :no_error
# @option error [Symbol] :internal_error
# @option error [Symbol] :flow_control_error
# @option error [Symbol] :stream_closed
# @option error [Symbol] :frame_too_large
# @option error [Symbol] :compression_error
# @param msg [String]
def connection_error(error = :protocol_error, msg: nil, e: nil)
goaway(error) unless @state == :closed || @state == :new
@state, @error = :closed, error
klass = error.to_s.split('_').map(&:capitalize).join
msg ||= e && e.message
backtrace = (e && e.backtrace) || []
fail Error.const_get(klass), msg, backtrace
end
alias error connection_error
def manage_state(_)
yield
end
end
|
SpontaneousCMS/cutaneous | lib/cutaneous/syntax.rb | Cutaneous.Syntax.token_map | ruby | def token_map
@token_map ||= Hash[tags.map { |type, tags| [tags[0], [type, tags[0].count(?{), tags[1].length]] }]
end | map the set of tags into a hash used by the parse routine that converts an opening tag into a
list of: tag type, the number of opening braces in the tag and the length of the closing tag | train | https://github.com/SpontaneousCMS/cutaneous/blob/b0dffbd18b360a8d089d9822821f15c04cdc1b33/lib/cutaneous/syntax.rb#L35-L37 | class Syntax
attr_reader :tags
def initialize(tag_definitions)
@tags = tag_definitions
end
def is_dynamic?(text)
!text.index(tag_start_pattern).nil?
end
def tag_start_pattern
@tag_start_pattern ||= compile_start_pattern
end
def escaped_tag_pattern
@escaped_tag_pattern ||= compile_start_pattern_with_prefix("\\\\")
end
def compile_start_pattern
not_escaped = "(?<!\\\\)"
compile_start_pattern_with_prefix(not_escaped)
end
def compile_start_pattern_with_prefix(prefix)
openings = self.tags.map { |type, tags| Regexp.escape(tags[0]) }
Regexp.new("#{prefix}(#{ openings.join("|") })")
end
# map the set of tags into a hash used by the parse routine that converts an opening tag into a
# list of: tag type, the number of opening braces in the tag and the length of the closing tag
end
|
sumoheavy/jira-ruby | lib/jira/client.rb | JIRA.Client.post | ruby | def post(path, body = '', headers = {})
headers = { 'Content-Type' => 'application/json' }.merge(headers)
request(:post, path, body, merge_default_headers(headers))
end | HTTP methods with a body | train | https://github.com/sumoheavy/jira-ruby/blob/25e896f227ab81c528e9678708cb546d2f62f018/lib/jira/client.rb#L228-L231 | class Client
extend Forwardable
# The OAuth::Consumer instance returned by the OauthClient
#
# The authenticated client instance returned by the respective client type
# (Oauth, Basic)
attr_accessor :consumer, :request_client, :http_debug, :cache
# The configuration options for this client instance
attr_reader :options
def_delegators :@request_client, :init_access_token, :set_access_token, :set_request_token, :request_token, :access_token, :authenticated?
DEFAULT_OPTIONS = {
site: 'http://localhost:2990',
context_path: '/jira',
rest_base_path: '/rest/api/2',
ssl_verify_mode: OpenSSL::SSL::VERIFY_PEER,
use_ssl: true,
use_client_cert: false,
auth_type: :oauth,
http_debug: false,
default_headers: {}
}.freeze
def initialize(options = {})
options = DEFAULT_OPTIONS.merge(options)
@options = options
@options[:rest_base_path] = @options[:context_path] + @options[:rest_base_path]
if options[:use_client_cert]
raise ArgumentError, 'Options: :cert_path must be set when :use_client_cert is true' unless @options[:cert_path]
raise ArgumentError, 'Options: :key_path must be set when :use_client_cert is true' unless @options[:key_path]
@options[:cert] = OpenSSL::X509::Certificate.new(File.read(@options[:cert_path]))
@options[:key] = OpenSSL::PKey::RSA.new(File.read(@options[:key_path]))
end
case options[:auth_type]
when :oauth, :oauth_2legged
@request_client = OauthClient.new(@options)
@consumer = @request_client.consumer
when :jwt
@request_client = JwtClient.new(@options)
when :basic
@request_client = HttpClient.new(@options)
when :cookie
raise ArgumentError, 'Options: :use_cookies must be true for :cookie authorization type' if @options.key?(:use_cookies) && !@options[:use_cookies]
@options[:use_cookies] = true
@request_client = HttpClient.new(@options)
@request_client.make_cookie_auth_request
@options.delete(:username)
@options.delete(:password)
else
raise ArgumentError, 'Options: ":auth_type" must be ":oauth",":oauth_2legged", ":cookie" or ":basic"'
end
@http_debug = @options[:http_debug]
@options.freeze
@cache = OpenStruct.new
end
def Project # :nodoc:
JIRA::Resource::ProjectFactory.new(self)
end
def Issue # :nodoc:
JIRA::Resource::IssueFactory.new(self)
end
def Filter # :nodoc:
JIRA::Resource::FilterFactory.new(self)
end
def Component # :nodoc:
JIRA::Resource::ComponentFactory.new(self)
end
def User # :nodoc:
JIRA::Resource::UserFactory.new(self)
end
def Issuetype # :nodoc:
JIRA::Resource::IssuetypeFactory.new(self)
end
def Priority # :nodoc:
JIRA::Resource::PriorityFactory.new(self)
end
def Status # :nodoc:
JIRA::Resource::StatusFactory.new(self)
end
def Resolution # :nodoc:
JIRA::Resource::ResolutionFactory.new(self)
end
def Comment # :nodoc:
JIRA::Resource::CommentFactory.new(self)
end
def Attachment # :nodoc:
JIRA::Resource::AttachmentFactory.new(self)
end
def Worklog # :nodoc:
JIRA::Resource::WorklogFactory.new(self)
end
def Version # :nodoc:
JIRA::Resource::VersionFactory.new(self)
end
def Transition # :nodoc:
JIRA::Resource::TransitionFactory.new(self)
end
def Field # :nodoc:
JIRA::Resource::FieldFactory.new(self)
end
def Board
JIRA::Resource::BoardFactory.new(self)
end
def RapidView
JIRA::Resource::RapidViewFactory.new(self)
end
def Sprint
JIRA::Resource::SprintFactory.new(self)
end
def SprintReport
JIRA::Resource::SprintReportFactory.new(self)
end
def ServerInfo
JIRA::Resource::ServerInfoFactory.new(self)
end
def Createmeta
JIRA::Resource::CreatemetaFactory.new(self)
end
def ApplicationLink
JIRA::Resource::ApplicationLinkFactory.new(self)
end
def Watcher
JIRA::Resource::WatcherFactory.new(self)
end
def Webhook
JIRA::Resource::WebhookFactory.new(self)
end
def Issuelink
JIRA::Resource::IssuelinkFactory.new(self)
end
def Issuelinktype
JIRA::Resource::IssuelinktypeFactory.new(self)
end
def Remotelink
JIRA::Resource::RemotelinkFactory.new(self)
end
def Sprint
JIRA::Resource::SprintFactory.new(self)
end
def Agile
JIRA::Resource::AgileFactory.new(self)
end
# HTTP methods without a body
def delete(path, headers = {})
request(:delete, path, nil, merge_default_headers(headers))
end
def get(path, headers = {})
request(:get, path, nil, merge_default_headers(headers))
end
def head(path, headers = {})
request(:head, path, nil, merge_default_headers(headers))
end
# HTTP methods with a body
def put(path, body = '', headers = {})
headers = { 'Content-Type' => 'application/json' }.merge(headers)
request(:put, path, body, merge_default_headers(headers))
end
# Sends the specified HTTP request to the REST API through the
# appropriate method (oauth, basic).
def request(http_method, path, body = '', headers = {})
puts "#{http_method}: #{path} - [#{body}]" if @http_debug
@request_client.request(http_method, path, body, headers)
end
protected
def merge_default_headers(headers)
{ 'Accept' => 'application/json' }.merge(@options[:default_headers]).merge(headers)
end
end
|
mongodb/mongo-ruby-driver | lib/mongo/collection.rb | Mongo.Collection.create | ruby | def create(opts = {})
operation = { :create => name }.merge(options)
operation.delete(:write)
server = next_primary
if (options[:collation] || options[Operation::COLLATION]) && !server.features.collation_enabled?
raise Error::UnsupportedCollation.new
end
client.send(:with_session, opts) do |session|
Operation::Create.new({
selector: operation,
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(server)
end
end | Force the collection to be created in the database.
@example Force the collection to be created.
collection.create
@param [ Hash ] opts The options for the create operation.
@option options [ Session ] :session The session to use for the operation.
@return [ Result ] The result of the command.
@since 2.0.0 | train | https://github.com/mongodb/mongo-ruby-driver/blob/dca26d0870cb3386fad9ccc1d17228097c1fe1c8/lib/mongo/collection.rb#L184-L199 | class Collection
extend Forwardable
include Retryable
# The capped option.
#
# @since 2.1.0
CAPPED = 'capped'.freeze
# The ns field constant.
#
# @since 2.1.0
NS = 'ns'.freeze
# @return [ Mongo::Database ] The database the collection resides in.
attr_reader :database
# @return [ String ] The name of the collection.
attr_reader :name
# @return [ Hash ] The collection options.
attr_reader :options
# Get client, cluster, read preference, and write concern from client.
def_delegators :database, :client, :cluster
# Delegate to the cluster for the next primary.
def_delegators :cluster, :next_primary
# Options that can be updated on a new Collection instance via the #with method.
#
# @since 2.1.0
CHANGEABLE_OPTIONS = [ :read, :read_concern, :write ].freeze
# Check if a collection is equal to another object. Will check the name and
# the database for equality.
#
# @example Check collection equality.
# collection == other
#
# @param [ Object ] other The object to check.
#
# @return [ true, false ] If the objects are equal.
#
# @since 2.0.0
def ==(other)
return false unless other.is_a?(Collection)
name == other.name && database == other.database && options == other.options
end
# Instantiate a new collection.
#
# @example Instantiate a new collection.
# Mongo::Collection.new(database, 'test')
#
# @param [ Mongo::Database ] database The collection's database.
# @param [ String, Symbol ] name The collection name.
# @param [ Hash ] options The collection options.
#
# @since 2.0.0
def initialize(database, name, options = {})
raise Error::InvalidCollectionName.new unless name
@database = database
@name = name.to_s.freeze
@options = options.freeze
end
# Get the read concern for this collection instance.
#
# @example Get the read concern.
# collection.read_concern
#
# @return [ Hash ] The read concern.
#
# @since 2.2.0
def read_concern
options[:read_concern] || database.read_concern
end
# Get the server selector on this collection.
#
# @example Get the server selector.
# collection.server_selector
#
# @return [ Mongo::ServerSelector ] The server selector.
#
# @since 2.0.0
def server_selector
@server_selector ||= ServerSelector.get(read_preference || database.server_selector)
end
# Get the read preference on this collection.
#
# @example Get the read preference.
# collection.read_preference
#
# @return [ Hash ] The read preference.
#
# @since 2.0.0
def read_preference
@read_preference ||= options[:read] || database.read_preference
end
# Get the write concern on this collection.
#
# @example Get the write concern.
# collection.write_concern
#
# @return [ Mongo::WriteConcern ] The write concern.
#
# @since 2.0.0
def write_concern
@write_concern ||= WriteConcern.get(options[:write] || database.write_concern)
end
# Provides a new collection with either a new read preference or new write concern
# merged over the existing read preference / write concern.
#
# @example Get a collection with changed read preference.
# collection.with(:read => { :mode => :primary_preferred })
#
# @example Get a collection with changed write concern.
# collection.with(:write => { w: 3 })
# @param [ Hash ] new_options The new options to use.
#
# @return [ Mongo::Collection ] A new collection instance.
#
# @since 2.1.0
def with(new_options)
new_options.keys.each do |k|
raise Error::UnchangeableCollectionOption.new(k) unless CHANGEABLE_OPTIONS.include?(k)
end
Collection.new(database, name, options.merge(new_options))
end
# Is the collection capped?
#
# @example Is the collection capped?
# collection.capped?
#
# @return [ true, false ] If the collection is capped.
#
# @since 2.0.0
def capped?
database.command(:collstats => name).documents[0][CAPPED]
end
# Force the collection to be created in the database.
#
# @example Force the collection to be created.
# collection.create
#
# @param [ Hash ] opts The options for the create operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
# Drop the collection. Will also drop all indexes associated with the
# collection.
#
# @note An error returned if the collection doesn't exist is suppressed.
#
# @example Drop the collection.
# collection.drop
#
# @param [ Hash ] opts The options for the drop operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
def drop(opts = {})
client.send(:with_session, opts) do |session|
Operation::Drop.new({
selector: { :drop => name },
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(next_primary)
end
rescue Error::OperationFailure => ex
raise ex unless ex.message =~ /ns not found/
false
end
# Find documents in the collection.
#
# @example Find documents in the collection by a selector.
# collection.find(name: 1)
#
# @example Get all documents in a collection.
# collection.find
#
# @param [ Hash ] filter The filter to use in the find.
# @param [ Hash ] options The options for the find.
#
# @option options [ true, false ] :allow_partial_results Allows the query to get partial
# results if some shards are down.
# @option options [ Integer ] :batch_size The number of documents returned in each batch
# of results from MongoDB.
# @option options [ String ] :comment Associate a comment with the query.
# @option options [ :tailable, :tailable_await ] :cursor_type The type of cursor to use.
# @option options [ Integer ] :limit The max number of docs to return from the query.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the query
# to run in milliseconds.
# @option options [ Hash ] :modifiers A document containing meta-operators modifying the
# output or behavior of a query.
# @option options [ true, false ] :no_cursor_timeout The server normally times out idle
# cursors after an inactivity period (10 minutes) to prevent excess memory use.
# Set this option to prevent that.
# @option options [ true, false ] :oplog_replay Internal replication use only - driver
# should not set.
# @option options [ Hash ] :projection The fields to include or exclude from each doc
# in the result set.
# @option options [ Integer ] :skip The number of docs to skip before returning results.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ CollectionView ] The collection view.
#
# @since 2.0.0
def find(filter = nil, options = {})
View.new(self, filter || {}, options)
end
# Perform an aggregation on the collection.
#
# @example Perform an aggregation.
# collection.aggregate([ { "$group" => { "_id" => "$city", "tpop" => { "$sum" => "$pop" }}} ])
#
# @param [ Array<Hash> ] pipeline The aggregation pipeline.
# @param [ Hash ] options The aggregation options.
#
# @option options [ true, false ] :allow_disk_use Set to true if disk usage is allowed during
# the aggregation.
# @option options [ Integer ] :batch_size The number of documents to return per batch.
# @option options [ Integer ] :max_time_ms The maximum amount of time in milliseconds to allow the
# aggregation to run.
# @option options [ true, false ] :use_cursor Indicates whether the command will request that the server
# provide results using a cursor. Note that as of server version 3.6, aggregations always provide results
# using a cursor and this option is therefore not valid.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ String ] :comment Associate a comment with the aggregation.
# @option options [ Session ] :session The session to use.
#
# @return [ Aggregation ] The aggregation object.
#
# @since 2.1.0
def aggregate(pipeline, options = {})
View.new(self, {}, options).aggregate(pipeline, options)
end
# As of version 3.6 of the MongoDB server, a ``$changeStream`` pipeline
# stage is supported in the aggregation framework. This stage allows users
# to request that notifications are sent for all changes to a particular
# collection.
#
# @example Get change notifications for a given collection.
# collection.watch([{ '$match' => { operationType: { '$in' => ['insert', 'replace'] } } }])
#
# @param [ Array<Hash> ] pipeline Optional additional filter operators.
# @param [ Hash ] options The change stream options.
#
# @option options [ String ] :full_document Allowed values: ‘default’,
# ‘updateLookup’. Defaults to ‘default’. When set to ‘updateLookup’,
# the change notification for partial updates will include both a delta
# describing the changes to the document, as well as a copy of the entire
# document that was changed from some time after the change occurred.
# @option options [ BSON::Document, Hash ] :resume_after Specifies the
# logical starting point for the new change stream.
# @option options [ Integer ] :max_await_time_ms The maximum amount of time
# for the server to wait on new documents to satisfy a change stream query.
# @option options [ Integer ] :batch_size The number of documents to return
# per batch.
# @option options [ BSON::Document, Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
# @option options [ BSON::Timestamp ] :start_at_operation_time Only return
# changes that occurred at or after the specified timestamp. Any command run
# against the server will return a cluster time that can be used here.
# Only recognized by server versions 4.0+.
#
# @note A change stream only allows 'majority' read concern.
# @note This helper method is preferable to running a raw aggregation with
# a $changeStream stage, for the purpose of supporting resumability.
#
# @return [ ChangeStream ] The change stream object.
#
# @since 2.5.0
def watch(pipeline = [], options = {})
View::ChangeStream.new(View.new(self, {}, options), pipeline, nil, options)
end
# Gets the number of matching documents in the collection.
#
# @example Get the count.
# collection.count(name: 1)
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options The count options.
#
# @option options [ Hash ] :hint The index to use.
# @option options [ Integer ] :limit The maximum number of documents to count.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Integer ] :skip The number of documents to skip before counting.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Integer ] The document count.
#
# @since 2.1.0
#
# @deprecated Use #count_documents or estimated_document_count instead. However, note that the
# following operators will need to be substituted when switching to #count_documents:
# * $where should be replaced with $expr (only works on 3.6+)
# * $near should be replaced with $geoWithin with $center
# * $nearSphere should be replaced with $geoWithin with $centerSphere
def count(filter = nil, options = {})
View.new(self, filter || {}, options).count(options)
end
# Gets the number of of matching documents in the collection. Unlike the deprecated #count
# method, this will return the exact number of documents matching the filter rather than the estimate.
#
# @example Get the number of documents in the collection.
# collection_view.count_documents
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options Options for the operation.
#
# @option opts :skip [ Integer ] The number of documents to skip.
# @option opts :hint [ Hash ] Override default index selection and force
# MongoDB to use a specific index for the query. Requires server version 3.6+.
# @option opts :limit [ Integer ] Max number of docs to count.
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the
# command to run.
# @option opts [ Hash ] :read The read preference options.
# @option opts [ Hash ] :collation The collation to use.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def count_documents(filter, options = {})
View.new(self, filter, options).count_documents(options)
end
# Gets an estimate of the count of documents in a collection using collection metadata.
#
# @example Get the number of documents in the collection.
# collection_view.estimated_document_count
#
# @param [ Hash ] options Options for the operation.
#
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the command to
# run.
# @option opts [ Hash ] :read The read preference options.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def estimated_document_count(options = {})
View.new(self, {}, options).estimated_document_count(options)
end
# Get a list of distinct values for a specific field.
#
# @example Get the distinct values.
# collection.distinct('name')
#
# @param [ Symbol, String ] field_name The name of the field.
# @param [ Hash ] filter The documents from which to retrieve the distinct values.
# @param [ Hash ] options The distinct command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Object> ] The list of distinct values.
#
# @since 2.1.0
def distinct(field_name, filter = nil, options = {})
View.new(self, filter || {}, options).distinct(field_name, options)
end
# Get a view of all indexes for this collection. Can be iterated or has
# more operations.
#
# @example Get the index view.
# collection.indexes
#
# @param [ Hash ] options Options for getting a list of all indexes.
#
# @option options [ Session ] :session The session to use.
#
# @return [ View::Index ] The index view.
#
# @since 2.0.0
def indexes(options = {})
Index::View.new(self, options)
end
# Get a pretty printed string inspection for the collection.
#
# @example Inspect the collection.
# collection.inspect
#
# @return [ String ] The collection inspection.
#
# @since 2.0.0
def inspect
"#<Mongo::Collection:0x#{object_id} namespace=#{namespace}>"
end
# Insert a single document into the collection.
#
# @example Insert a document into the collection.
# collection.insert_one({ name: 'test' })
#
# @param [ Hash ] document The document to insert.
# @param [ Hash ] opts The insert options.
#
# @option opts [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_one(document, opts = {})
client.send(:with_session, opts) do |session|
write_with_retry(session, write_concern) do |server, txn_num|
Operation::Insert.new(
:documents => [ document ],
:db_name => database.name,
:coll_name => name,
:write_concern => write_concern,
:bypass_document_validation => !!opts[:bypass_document_validation],
:options => opts,
:id_generator => client.options[:id_generator],
:session => session,
:txn_num => txn_num
).execute(server)
end
end
end
# Insert the provided documents into the collection.
#
# @example Insert documents into the collection.
# collection.insert_many([{ name: 'test' }])
#
# @param [ Array<Hash> ] documents The documents to insert.
# @param [ Hash ] options The insert options.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_many(documents, options = {})
inserts = documents.map{ |doc| { :insert_one => doc }}
bulk_write(inserts, options)
end
# Execute a batch of bulk write operations.
#
# @example Execute a bulk write.
# collection.bulk_write(operations, options)
#
# @param [ Array<Hash> ] requests The bulk write requests.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :ordered Whether the operations
# should be executed in order.
# @option options [ Hash ] :write_concern The write concern options.
# Can be :w => Integer, :fsync => Boolean, :j => Boolean.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Session ] :session The session to use for the set of operations.
#
# @return [ BulkWrite::Result ] The result of the operation.
#
# @since 2.0.0
def bulk_write(requests, options = {})
BulkWrite.new(self, requests, options).execute
end
# Remove a document from the collection.
#
# @example Remove a single document from the collection.
# collection.delete_one
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_one(filter = nil, options = {})
find(filter, options).delete_one(options)
end
# Remove documents from the collection.
#
# @example Remove multiple documents from the collection.
# collection.delete_many
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_many(filter = nil, options = {})
find(filter, options).delete_many(options)
end
# Execute a parallel scan on the collection view.
#
# Returns a list of up to cursor_count cursors that can be iterated concurrently.
# As long as the collection is not modified during scanning, each document appears once
# in one of the cursors' result sets.
#
# @example Execute a parallel collection scan.
# collection.parallel_scan(2)
#
# @param [ Integer ] cursor_count The max number of cursors to return.
# @param [ Hash ] options The parallel scan command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Cursor> ] An array of cursors.
#
# @since 2.1
def parallel_scan(cursor_count, options = {})
find({}, options).send(:parallel_scan, cursor_count, options)
end
# Replaces a single document in the collection with the new document.
#
# @example Replace a single document.
# collection.replace_one({ name: 'test' }, { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] replacement The replacement document..
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def replace_one(filter, replacement, options = {})
find(filter, options).replace_one(replacement, options)
end
# Update documents in the collection.
#
# @example Update multiple documents in the collection.
# collection.update_many({ name: 'test'}, '$set' => { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def update_many(filter, update, options = {})
find(filter, options).update_many(update, options)
end
# Update a single document in the collection.
#
# @example Update a single document in the collection.
# collection.update_one({ name: 'test'}, '$set' => { name: 'test1'})
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def update_one(filter, update, options = {})
find(filter, options).update_one(update, options)
end
# Finds a single document in the database via findAndModify and deletes
# it, returning the original document.
#
# @example Find one document and delete it.
# collection.find_one_and_delete(name: 'test')
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document, nil ] The document, if found.
#
# @since 2.1.0
def find_one_and_delete(filter, options = {})
find(filter, options).find_one_and_delete(options)
end
# Finds a single document via findAndModify and updates it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and update it, returning the original.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }})
#
# @example Find a document and update it, returning the updated document.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }}, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_update(filter, update, options = {})
find(filter, options).find_one_and_update(update, options)
end
# Finds a single document and replaces it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and replace it, returning the original.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' })
#
# @example Find a document and replace it, returning the new document.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' }, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] replacement The replacement document.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_replace(filter, replacement, options = {})
find(filter, options).find_one_and_update(replacement, options)
end
# Get the fully qualified namespace of the collection.
#
# @example Get the fully qualified namespace.
# collection.namespace
#
# @return [ String ] The collection namespace.
#
# @since 2.0.0
def namespace
"#{database.name}.#{name}"
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Image.color_floodfill | ruby | def color_floodfill(x, y, fill)
target = pixel_color(x, y)
color_flood_fill(target, fill, x, y, Magick::FloodfillMethod)
end | Set all pixels that have the same color as the pixel at x,y and
are neighbors to the fill color | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L771-L774 | class Image
include Comparable
alias affinity remap
# Provide an alternate version of Draw#annotate, for folks who
# want to find it in this class.
def annotate(draw, width, height, x, y, text, &block)
check_destroyed
draw.annotate(self, width, height, x, y, text, &block)
self
end
# Set the color at x,y
def color_point(x, y, fill)
f = copy
f.pixel_color(x, y, fill)
f
end
# Set all pixels that have the same color as the pixel at x,y and
# are neighbors to the fill color
# Set all pixels that are neighbors of x,y and are not the border color
# to the fill color
def color_fill_to_border(x, y, fill)
color_flood_fill(border_color, fill, x, y, Magick::FillToBorderMethod)
end
# Set all pixels to the fill color. Very similar to Image#erase!
# Accepts either String or Pixel arguments
def color_reset!(fill)
save = background_color
# Change the background color _outside_ the begin block
# so that if this object is frozen the exeception will be
# raised before we have to handle it explicitly.
self.background_color = fill
begin
erase!
ensure
self.background_color = save
end
self
end
# Used by ImageList methods - see ImageList#cur_image
def cur_image
self
end
# Thanks to Russell Norris!
def each_pixel
get_pixels(0, 0, columns, rows).each_with_index do |p, n|
yield(p, n % columns, n / columns)
end
self
end
# Retrieve EXIF data by entry or all. If one or more entry names specified,
# return the values associated with the entries. If no entries specified,
# return all entries and values. The return value is an array of [name,value]
# arrays.
def get_exif_by_entry(*entry)
ary = []
if entry.length.zero?
exif_data = self['EXIF:*']
exif_data.split("\n").each { |exif| ary.push(exif.split('=')) } if exif_data
else
get_exif_by_entry # ensure properties is populated with exif data
entry.each do |name|
rval = self["EXIF:#{name}"]
ary.push([name, rval])
end
end
ary
end
# Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash.
def get_exif_by_number(*tag)
hash = {}
if tag.length.zero?
exif_data = self['EXIF:!']
if exif_data
exif_data.split("\n").each do |exif|
tag, value = exif.split('=')
tag = tag[1, 4].hex
hash[tag] = value
end
end
else
get_exif_by_number # ensure properties is populated with exif data
tag.each do |num|
rval = self[format('#%04X', num.to_i)]
hash[num] = rval == 'unknown' ? nil : rval
end
end
hash
end
# Retrieve IPTC information by record number:dataset tag constant defined in
# Magick::IPTC, above.
def get_iptc_dataset(ds)
self['IPTC:' + ds]
end
# Iterate over IPTC record number:dataset tags, yield for each non-nil dataset
def each_iptc_dataset
Magick::IPTC.constants.each do |record|
rec = Magick::IPTC.const_get(record)
rec.constants.each do |dataset|
data_field = get_iptc_dataset(rec.const_get(dataset))
yield(dataset, data_field) unless data_field.nil?
end
end
nil
end
# Patches problematic change to the order of arguments in 1.11.0.
# Before this release, the order was
# black_point, gamma, white_point
# RMagick 1.11.0 changed this to
# black_point, white_point, gamma
# This fix tries to determine if the arguments are in the old order and
# if so, swaps the gamma and white_point arguments. Then it calls
# level2, which simply accepts the arguments as given.
# Inspect the gamma and white point values and swap them if they
# look like they're in the old order.
# (Thanks to Al Evans for the suggestion.)
def level(black_point = 0.0, white_point = nil, gamma = nil)
black_point = Float(black_point)
white_point ||= Magick::QuantumRange - black_point
white_point = Float(white_point)
gamma_arg = gamma
gamma ||= 1.0
gamma = Float(gamma)
if gamma.abs > 10.0 || white_point.abs <= 10.0 || white_point.abs < gamma.abs
gamma, white_point = white_point, gamma
white_point = Magick::QuantumRange - black_point unless gamma_arg
end
level2(black_point, white_point, gamma)
end
# These four methods are equivalent to the Draw#matte method
# with the "Point", "Replace", "Floodfill", "FilltoBorder", and
# "Replace" arguments, respectively.
# Make the pixel at (x,y) transparent.
def matte_point(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
pixel = f.pixel_color(x, y)
pixel.opacity = TransparentOpacity
f.pixel_color(x, y, pixel)
f
end
# Make transparent all pixels that are the same color as the
# pixel at (x, y).
def matte_replace(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.transparent(target)
end
# Make transparent any pixel that matches the color of the pixel
# at (x,y) and is a neighbor.
def matte_floodfill(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.matte_flood_fill(target, TransparentOpacity,
x, y, FloodfillMethod)
end
# Make transparent any neighbor pixel that is not the border color.
def matte_fill_to_border(x, y)
f = copy
f.opacity = Magick::OpaqueOpacity unless f.alpha?
f.matte_flood_fill(border_color, TransparentOpacity,
x, y, FillToBorderMethod)
end
# Make all pixels transparent.
def matte_reset!
self.opacity = Magick::TransparentOpacity
self
end
# Force an image to exact dimensions without changing the aspect ratio.
# Resize and crop if necessary. (Thanks to Jerett Taylor!)
def resize_to_fill(ncols, nrows = nil, gravity = CenterGravity)
copy.resize_to_fill!(ncols, nrows, gravity)
end
def resize_to_fill!(ncols, nrows = nil, gravity = CenterGravity)
nrows ||= ncols
if ncols != columns || nrows != rows
scale = [ncols / columns.to_f, nrows / rows.to_f].max
resize!(scale * columns + 0.5, scale * rows + 0.5)
end
crop!(gravity, ncols, nrows, true) if ncols != columns || nrows != rows
self
end
# Preserve aliases used < RMagick 2.0.1
alias crop_resized resize_to_fill
alias crop_resized! resize_to_fill!
# Convenience method to resize retaining the aspect ratio.
# (Thanks to Robert Manni!)
def resize_to_fit(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize(ncols, nrows)
end
end
def resize_to_fit!(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize!(ncols, nrows)
end
end
# Replace matching neighboring pixels with texture pixels
def texture_floodfill(x, y, texture)
target = pixel_color(x, y)
texture_flood_fill(target, texture, x, y, FloodfillMethod)
end
# Replace neighboring pixels to border color with texture pixels
def texture_fill_to_border(x, y, texture)
texture_flood_fill(border_color, texture, x, y, FillToBorderMethod)
end
# Construct a view. If a block is present, yield and pass the view
# object, otherwise return the view object.
def view(x, y, width, height)
view = View.new(self, x, y, width, height)
return view unless block_given?
begin
yield(view)
ensure
view.sync
end
nil
end
# Magick::Image::View class
class View
attr_reader :x, :y, :width, :height
attr_accessor :dirty
def initialize(img, x, y, width, height)
img.check_destroyed
Kernel.raise ArgumentError, "invalid geometry (#{width}x#{height}+#{x}+#{y})" if width <= 0 || height <= 0
Kernel.raise RangeError, "geometry (#{width}x#{height}+#{x}+#{y}) exceeds image boundary" if x < 0 || y < 0 || (x + width) > img.columns || (y + height) > img.rows
@view = img.get_pixels(x, y, width, height)
@img = img
@x = x
@y = y
@width = width
@height = height
@dirty = false
end
def [](*args)
rows = Rows.new(@view, @width, @height, args)
rows.add_observer(self)
rows
end
# Store changed pixels back to image
def sync(force = false)
@img.store_pixels(x, y, width, height, @view) if @dirty || force
@dirty || force
end
# Get update from Rows - if @dirty ever becomes
# true, don't change it back to false!
def update(rows)
@dirty = true
rows.delete_observer(self) # No need to tell us again.
nil
end
# Magick::Image::View::Pixels
# Defines channel attribute getters/setters
class Pixels < Array
include Observable
# Define a getter and a setter for each channel.
%i[red green blue opacity].each do |c|
module_eval <<-END_EVAL
def #{c}
return collect { |p| p.#{c} }
end
def #{c}=(v)
each { |p| p.#{c} = v }
changed
notify_observers(self)
nil
end
END_EVAL
end
end # class Magick::Image::View::Pixels
# Magick::Image::View::Rows
class Rows
include Observable
def initialize(view, width, height, rows)
@view = view
@width = width
@height = height
@rows = rows
end
def [](*args)
cols(args)
# Both View::Pixels and Magick::Pixel implement Observable
if @unique
pixels = @view[@rows[0] * @width + @cols[0]]
pixels.add_observer(self)
else
pixels = View::Pixels.new
each do |x|
p = @view[x]
p.add_observer(self)
pixels << p
end
end
pixels
end
def []=(*args)
rv = args.delete_at(-1) # get rvalue
unless rv.is_a?(Pixel) # must be a Pixel or a color name
begin
rv = Pixel.from_color(rv)
rescue TypeError
Kernel.raise TypeError, "cannot convert #{rv.class} into Pixel"
end
end
cols(args)
each { |x| @view[x] = rv.dup }
changed
notify_observers(self)
nil
end
# A pixel has been modified. Tell the view.
def update(pixel)
changed
notify_observers(self)
pixel.delete_observer(self) # Don't need to hear again.
nil
end
private
def cols(*args)
@cols = args[0] # remove the outermost array
@unique = false
# Convert @rows to an Enumerable object
case @rows.length
when 0 # Create a Range for all the rows
@rows = Range.new(0, @height, true)
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @rows.first.respond_to? :each
@rows = @rows.first
else
@rows = Integer(@rows.first)
@rows += @height if @rows < 0
Kernel.raise IndexError, "index [#{@rows}] out of range" if @rows < 0 || @rows > @height - 1
# Convert back to an array
@rows = Array.new(1, @rows)
@unique = true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@rows[0])
length = Integer(@rows[1])
# Negative start -> start from last row
start += @height if start < 0
if start > @height || start < 0 || length < 0
Kernel.raise IndexError, "index [#{@rows.first}] out of range"
elsif start + length > @height
length = @height - length
length = [length, 0].max
end
# Create a Range for the specified set of rows
@rows = Range.new(start, start + length, true)
end
case @cols.length
when 0 # all rows
@cols = Range.new(0, @width, true) # convert to range
@unique = false
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @cols.first.respond_to? :each
@cols = @cols.first
@unique = false
else
@cols = Integer(@cols.first)
@cols += @width if @cols < 0
Kernel.raise IndexError, "index [#{@cols}] out of range" if @cols < 0 || @cols > @width - 1
# Convert back to array
@cols = Array.new(1, @cols)
@unique &&= true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@cols[0])
length = Integer(@cols[1])
# Negative start -> start from last row
start += @width if start < 0
if start > @width || start < 0 || length < 0
# nop
elsif start + length > @width
length = @width - length
length = [length, 0].max
end
# Create a Range for the specified set of columns
@cols = Range.new(start, start + length, true)
@unique = false
end
end
# iterator called from subscript methods
def each
maxrows = @height - 1
maxcols = @width - 1
@rows.each do |j|
Kernel.raise IndexError, "index [#{j}] out of range" if j > maxrows
@cols.each do |i|
Kernel.raise IndexError, "index [#{i}] out of range" if i > maxcols
yield j * @width + i
end
end
nil # useless return value
end
end # class Magick::Image::View::Rows
end # class Magick::Image::View
end # class Magick::Image
|
trishume/pro | lib/pro/commands.rb | Pro.Commands.status | ruby | def status()
max_name = @index.map {|repo| repo.name.length}.max + 1
@index.each do |r|
next unless Dir.exists?(r.path)
status = repo_status(r.path)
next if status.empty?
name = format("%-#{max_name}s",r.name).bold
puts "#{name} > #{status}"
end
end | prints a status list showing repos with
unpushed commits or uncommitted changes | train | https://github.com/trishume/pro/blob/646098c7514eb5346dd2942f90b888c0de30b6ba/lib/pro/commands.rb#L88-L97 | class Commands
EMPTY_MESSAGE = 'empty'.green
UNCOMMITTED_MESSAGE = 'uncommitted'.red
UNTRACKED_MESSAGE = 'untracked'.magenta
UNPUSHED_MESSAGE = 'unpushed'.blue
JOIN_STRING = ' + '
def initialize(index)
@index = index
end
# Fuzzy search for a git repository by name
# Returns the full path to the repository.
#
# If name is nil return the pro base.
def find_repo(name)
return @index.base_dirs.first unless name
match = FuzzyMatch.new(@index.to_a, :read => :name).find(name)
match[1] unless match.nil?
end
def run_command(command, confirm = true)
if confirm
print "Do you really want to run '#{command.bold}' on all repos [Yn]? "
ans = STDIN.gets.chomp.downcase
return unless ans == 'y' || ans.empty?
end
@index.each do |r|
next unless Dir.exists?(r.path)
Dir.chdir(r.path)
stdin, result, wait_thr = Open3.popen2e(command)
if wait_thr.value == 0
puts "#{r.name}:".bold.green
else
puts "#{r.name}:".bold.red
end
puts result.read
[stdin, result].map &:close
end
end
# Prints out the paths to all repositories in all bases
def list_repos()
@index.each do |r|
puts r.path
end
end
# prints out all the base directories
def list_bases
@index.base_dirs.each do |b|
puts b
end
end
# prints a status list showing repos with
# unpushed commits or uncommitted changes
# returns a short status message for the repo
def repo_status(path)
messages = []
messages << EMPTY_MESSAGE if repo_empty?(path)
messages << UNCOMMITTED_MESSAGE if commit_pending?(path)
messages << UNTRACKED_MESSAGE if untracked_files?(path)
messages << UNPUSHED_MESSAGE if repo_unpushed?(path)
messages.join(JOIN_STRING)
end
# Checks if there are nothing in the repo
def repo_empty?(path)
status = ""
Dir.chdir(path) do
status = `git status 2>/dev/null`
end
return status.include?("Initial commit")
end
# Checks if there are pending commits / edited files
def commit_pending?(path)
status = ""
Dir.chdir(path) do
status = `git status 2>/dev/null`
end
return status.include?("Changes to be committed") || status.include?("Changes not staged for commit")
end
# Checks if there are untracked files in the repo
def untracked_files?(path)
status = ""
Dir.chdir(path) do
status = `git status 2>/dev/null`
end
return status.include?("Untracked files")
end
# Finds if there are any commits which have not been pushed to origin
def repo_unpushed?(path)
unpushed = ""
Dir.chdir(path) do
branch_ref = `/usr/bin/git symbolic-ref HEAD 2>/dev/null`
branch = branch_ref.chomp.split('/').last
unpushed = `git cherry -v origin/#{branch} 2>/dev/null`
end
return !(unpushed.empty?)
end
# Adds a shell function to the shell config files that
# allows easy directory changing.
def install_cd
puts CD_INFO
print "Continue with installation (yN)? "
return unless gets.chomp.downcase == "y"
# get name
print "Name of pro cd command (default 'pd'): "
name = gets.strip
name = 'pd' if name.empty?
# sub into function
func = SHELL_FUNCTION.sub("{{name}}",name)
did_any = false
['~/.profile', '~/.bashrc','~/.zshrc','~/.bash_profile'].each do |rel_path|
# check if file exists
path = File.expand_path(rel_path)
next unless File.exists?(path)
# ask the user if they want to add it
print "Install #{name} function to #{rel_path} [yN]: "
next unless gets.chomp.downcase == "y"
# add it on to the end of the file
File.open(path,'a') do |file|
file.puts func
end
did_any = true
end
if did_any
puts "Done! #{name} will be available in new shells."
else
STDERR.puts "WARNING: Did not install in any shell dotfiles.".red
STDERR.puts "Maybe you should create the shell config file you want.".red
end
end
end
|
hashicorp/vagrant | lib/vagrant/box_collection.rb | Vagrant.BoxCollection.undir_name | ruby | def undir_name(name)
name = name.dup
name.gsub!(VAGRANT_COLON, ":")
name.gsub!(VAGRANT_SLASH, "/")
name
end | Returns the directory name for the box cleaned up | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/box_collection.rb#L394-L399 | class BoxCollection
TEMP_PREFIX = "vagrant-box-add-temp-".freeze
VAGRANT_SLASH = "-VAGRANTSLASH-".freeze
VAGRANT_COLON = "-VAGRANTCOLON-".freeze
# The directory where the boxes in this collection are stored.
#
# A box collection matches a very specific folder structure that Vagrant
# expects in order to easily manage and modify boxes. The folder structure
# is the following:
#
# COLLECTION_ROOT/BOX_NAME/PROVIDER/metadata.json
#
# Where:
#
# * COLLECTION_ROOT - This is the root of the box collection, and is
# the directory given to the initializer.
# * BOX_NAME - The name of the box. This is a logical name given by
# the user of Vagrant.
# * PROVIDER - The provider that the box was built for (VirtualBox,
# VMware, etc.).
# * metadata.json - A simple JSON file that at the bare minimum
# contains a "provider" key that matches the provider for the
# box. This metadata JSON, however, can contain anything.
#
# @return [Pathname]
attr_reader :directory
# Initializes the collection.
#
# @param [Pathname] directory The directory that contains the collection
# of boxes.
def initialize(directory, options=nil)
options ||= {}
@directory = directory
@hook = options[:hook]
@lock = Monitor.new
@temp_root = options[:temp_dir_root]
@logger = Log4r::Logger.new("vagrant::box_collection")
end
# This adds a new box to the system.
#
# There are some exceptional cases:
# * BoxAlreadyExists - The box you're attempting to add already exists.
# * BoxProviderDoesntMatch - If the given box provider doesn't match the
# actual box provider in the untarred box.
# * BoxUnpackageFailure - An invalid tar file.
#
# Preconditions:
# * File given in `path` must exist.
#
# @param [Pathname] path Path to the box file on disk.
# @param [String] name Logical name for the box.
# @param [String] version The version of this box.
# @param [Array<String>] providers The providers that this box can
# be a part of. This will be verified with the `metadata.json` and is
# meant as a basic check. If this isn't given, then whatever provider
# the box represents will be added.
# @param [Boolean] force If true, any existing box with the same name
# and provider will be replaced.
def add(path, name, version, **opts)
providers = opts[:providers]
providers = Array(providers) if providers
provider = nil
# A helper to check if a box exists. We store this in a variable
# since we call it multiple times.
check_box_exists = lambda do |box_formats|
box = find(name, box_formats, version)
next if !box
if !opts[:force]
@logger.error(
"Box already exists, can't add: #{name} v#{version} #{box_formats.join(", ")}")
raise Errors::BoxAlreadyExists,
name: name,
provider: box_formats.join(", "),
version: version
end
# We're forcing, so just delete the old box
@logger.info(
"Box already exists, but forcing so removing: " +
"#{name} v#{version} #{box_formats.join(", ")}")
box.destroy!
end
with_collection_lock do
log_provider = providers ? providers.join(", ") : "any provider"
@logger.debug("Adding box: #{name} (#{log_provider}) from #{path}")
# Verify the box doesn't exist early if we're given a provider. This
# can potentially speed things up considerably since we don't need
# to unpack any files.
check_box_exists.call(providers) if providers
# Create a temporary directory since we're not sure at this point if
# the box we're unpackaging already exists (if no provider was given)
with_temp_dir do |temp_dir|
# Extract the box into a temporary directory.
@logger.debug("Unpacking box into temporary directory: #{temp_dir}")
result = Util::Subprocess.execute(
"bsdtar", "--no-same-owner", "--no-same-permissions", "-v", "-x", "-m", "-s", "|\\\\\|/|", "-C", temp_dir.to_s, "-f", path.to_s)
if result.exit_code != 0
raise Errors::BoxUnpackageFailure,
output: result.stderr.to_s
end
# If we get a V1 box, we want to update it in place
if v1_box?(temp_dir)
@logger.debug("Added box is a V1 box. Upgrading in place.")
temp_dir = v1_upgrade(temp_dir)
end
# We re-wrap ourselves in the safety net in case we upgraded.
# If we didn't upgrade, then this is still safe because the
# helper will only delete the directory if it exists
with_temp_dir(temp_dir) do |final_temp_dir|
# Get an instance of the box we just added before it is finalized
# in the system so we can inspect and use its metadata.
box = Box.new(name, nil, version, final_temp_dir)
# Get the provider, since we'll need that to at the least add it
# to the system or check that it matches what is given to us.
box_provider = box.metadata["provider"]
if providers
found = providers.find { |p| p.to_sym == box_provider.to_sym }
if !found
@logger.error("Added box provider doesnt match expected: #{log_provider}")
raise Errors::BoxProviderDoesntMatch,
expected: log_provider, actual: box_provider
end
else
# Verify the box doesn't already exist
check_box_exists.call([box_provider])
end
# We weren't given a provider, so store this one.
provider = box_provider.to_sym
# Create the directory for this box, not including the provider
root_box_dir = @directory.join(dir_name(name))
box_dir = root_box_dir.join(version)
box_dir.mkpath
@logger.debug("Box directory: #{box_dir}")
# This is the final directory we'll move it to
final_dir = box_dir.join(provider.to_s)
if final_dir.exist?
@logger.debug("Removing existing provider directory...")
final_dir.rmtree
end
# Move to final destination
final_dir.mkpath
# Recursively move individual files from the temporary directory
# to the final location. We do this instead of moving the entire
# directory to avoid issues on Windows. [GH-1424]
copy_pairs = [[final_temp_dir, final_dir]]
while !copy_pairs.empty?
from, to = copy_pairs.shift
from.children(true).each do |f|
dest = to.join(f.basename)
# We don't copy entire directories, so create the
# directory and then add to our list to copy.
if f.directory?
dest.mkpath
copy_pairs << [f, dest]
next
end
# Copy the single file
@logger.debug("Moving: #{f} => #{dest}")
FileUtils.mv(f, dest)
end
end
if opts[:metadata_url]
root_box_dir.join("metadata_url").open("w") do |f|
f.write(opts[:metadata_url])
end
end
end
end
end
# Return the box
find(name, provider, version)
end
# This returns an array of all the boxes on the system, given by
# their name and their provider.
#
# @return [Array] Array of `[name, version, provider]` of the boxes
# installed on this system.
def all
results = []
with_collection_lock do
@logger.debug("Finding all boxes in: #{@directory}")
@directory.children(true).each do |child|
# Ignore non-directories, since files are not interesting to
# us in our folder structure.
next if !child.directory?
box_name = undir_name(child.basename.to_s)
# Otherwise, traverse the subdirectories and see what versions
# we have.
child.children(true).each do |versiondir|
next if !versiondir.directory?
next if versiondir.basename.to_s.start_with?(".")
version = versiondir.basename.to_s
versiondir.children(true).each do |provider|
# Ensure version of box is correct before continuing
if !Gem::Version.correct?(version)
ui = Vagrant::UI::Prefixed.new(Vagrant::UI::Colored.new, "vagrant")
ui.warn(I18n.t("vagrant.box_version_malformed",
version: version, box_name: box_name))
@logger.debug("Invalid version #{version} for box #{box_name}")
next
end
# Verify this is a potentially valid box. If it looks
# correct enough then include it.
if provider.directory? && provider.join("metadata.json").file?
provider_name = provider.basename.to_s.to_sym
@logger.debug("Box: #{box_name} (#{provider_name}, #{version})")
results << [box_name, version, provider_name]
else
@logger.debug("Invalid box #{box_name}, ignoring: #{provider}")
end
end
end
end
end
# Sort the list to group like providers and properly ordered versions
results.sort_by! do |box_result|
[box_result[0], box_result[2], Gem::Version.new(box_result[1])]
end
results
end
# Find a box in the collection with the given name and provider.
#
# @param [String] name Name of the box (logical name).
# @param [Array] providers Providers that the box implements.
# @param [String] version Version constraints to adhere to. Example:
# "~> 1.0" or "= 1.0, ~> 1.1"
# @return [Box] The box found, or `nil` if not found.
def find(name, providers, version)
providers = Array(providers)
# Build up the requirements we have
requirements = version.to_s.split(",").map do |v|
Gem::Requirement.new(v.strip)
end
with_collection_lock do
box_directory = @directory.join(dir_name(name))
if !box_directory.directory?
@logger.info("Box not found: #{name} (#{providers.join(", ")})")
return nil
end
# Keep a mapping of Gem::Version mangled versions => directories.
# ie. 0.1.0.pre.alpha.2 => 0.1.0-alpha.2
# This is so we can sort version numbers properly here, but still
# refer to the real directory names in path checks below and pass an
# unmangled version string to Box.new
version_dir_map = {}
versions = box_directory.children(true).map do |versiondir|
next if !versiondir.directory?
next if versiondir.basename.to_s.start_with?(".")
version = Gem::Version.new(versiondir.basename.to_s)
version_dir_map[version.to_s] = versiondir.basename.to_s
version
end.compact
# Traverse through versions with the latest version first
versions.sort.reverse.each do |v|
if !requirements.all? { |r| r.satisfied_by?(v) }
# Unsatisfied version requirements
next
end
versiondir = box_directory.join(version_dir_map[v.to_s])
providers.each do |provider|
provider_dir = versiondir.join(provider.to_s)
next if !provider_dir.directory?
@logger.info("Box found: #{name} (#{provider})")
metadata_url = nil
metadata_url_file = box_directory.join("metadata_url")
metadata_url = metadata_url_file.read if metadata_url_file.file?
if metadata_url && @hook
hook_env = @hook.call(
:authenticate_box_url, box_urls: [metadata_url])
metadata_url = hook_env[:box_urls].first
end
return Box.new(
name, provider, version_dir_map[v.to_s], provider_dir,
metadata_url: metadata_url,
)
end
end
end
nil
end
# This upgrades a v1.1 - v1.4 box directory structure up to a v1.5
# directory structure. This will raise exceptions if it fails in any
# way.
def upgrade_v1_1_v1_5
with_collection_lock do
temp_dir = Pathname.new(Dir.mktmpdir(TEMP_PREFIX, @temp_root))
@directory.children(true).each do |boxdir|
# Ignore all non-directories because they can't be boxes
next if !boxdir.directory?
box_name = boxdir.basename.to_s
# If it is a v1 box, then we need to upgrade it first
if v1_box?(boxdir)
upgrade_dir = v1_upgrade(boxdir)
FileUtils.mv(upgrade_dir, boxdir.join("virtualbox"))
end
# Create the directory for this box
new_box_dir = temp_dir.join(dir_name(box_name), "0")
new_box_dir.mkpath
# Go through each provider and move it
boxdir.children(true).each do |providerdir|
FileUtils.cp_r(providerdir, new_box_dir.join(providerdir.basename))
end
end
# Move the folder into place
@directory.rmtree
FileUtils.mv(temp_dir.to_s, @directory.to_s)
end
end
# Cleans the directory for a box by removing the folders that are
# empty.
def clean(name)
return false if exists?(name)
path = File.join(directory, dir_name(name))
FileUtils.rm_rf(path)
end
protected
# Returns the directory name for the box of the given name.
#
# @param [String] name
# @return [String]
def dir_name(name)
name = name.dup
name.gsub!(":", VAGRANT_COLON) if Util::Platform.windows?
name.gsub!("/", VAGRANT_SLASH)
name
end
# Returns the directory name for the box cleaned up
# This checks if the given directory represents a V1 box on the
# system.
#
# @param [Pathname] dir Directory where the box is unpacked.
# @return [Boolean]
def v1_box?(dir)
# We detect a V1 box given by whether there is a "box.ovf" which
# is a heuristic but is pretty accurate.
dir.join("box.ovf").file?
end
# This upgrades the V1 box contained unpacked in the given directory
# and returns the directory of the upgraded version. This is
# _destructive_ to the contents of the old directory. That is, the
# contents of the old V1 box will be destroyed or moved.
#
# Preconditions:
# * `dir` is a valid V1 box. Verify with {#v1_box?}
#
# @param [Pathname] dir Directory where the V1 box is unpacked.
# @return [Pathname] Path to the unpackaged V2 box.
def v1_upgrade(dir)
@logger.debug("Upgrading box in directory: #{dir}")
temp_dir = Pathname.new(Dir.mktmpdir(TEMP_PREFIX, @temp_root))
@logger.debug("Temporary directory for upgrading: #{temp_dir}")
# Move all the things into the temporary directory
dir.children(true).each do |child|
# Don't move the temp_dir
next if child == temp_dir
# Move every other directory into the temporary directory
@logger.debug("Copying to upgrade directory: #{child}")
FileUtils.mv(child, temp_dir.join(child.basename))
end
# If there is no metadata.json file, make one, since this is how
# we determine if the box is a V2 box.
metadata_file = temp_dir.join("metadata.json")
if !metadata_file.file?
metadata_file.open("w") do |f|
f.write(JSON.generate({
provider: "virtualbox"
}))
end
end
# Return the temporary directory
temp_dir
end
# This locks the region given by the block with a lock on this
# collection.
def with_collection_lock
@lock.synchronize do
return yield
end
end
# This is a helper that makes sure that our temporary directories
# are cleaned up no matter what.
#
# @param [String] dir Path to a temporary directory
# @return [Object] The result of whatever the yield is
def with_temp_dir(dir=nil)
dir ||= Dir.mktmpdir(TEMP_PREFIX, @temp_root)
dir = Pathname.new(dir)
yield dir
ensure
FileUtils.rm_rf(dir.to_s)
end
# Checks if a box with a given name exists.
def exists?(box_name)
all.any? { |box| box.first.eql?(box_name) }
end
end
|
PierreRambaud/gemirro | lib/gemirro/source.rb | Gemirro.Source.fetch_versions | ruby | def fetch_versions
Utils.logger.info(
"Fetching #{Configuration.versions_file} on #{@name} (#{@host})"
)
Http.get(host + '/' + Configuration.versions_file).body
end | @param [String] name
@param [String] host
@param [Array] gems
Fetches a list of all the available Gems and their versions.
@return [String] | train | https://github.com/PierreRambaud/gemirro/blob/5c6b5abb5334ed3beb256f6764bc336e2cf2dc21/lib/gemirro/source.rb#L32-L38 | class Source
attr_reader :name, :host, :gems
##
# @param [String] name
# @param [String] host
# @param [Array] gems
#
def initialize(name, host, gems = [])
@name = name.downcase.gsub(/\s+/, '_')
@host = host.chomp('/')
@gems = gems
end
##
# Fetches a list of all the available Gems and their versions.
#
# @return [String]
#
##
# Fetches a list of all the available Gems and their versions.
#
# @return [String]
#
def fetch_prerelease_versions
Utils.logger.info(
"Fetching #{Configuration.prerelease_versions_file}" \
" on #{@name} (#{@host})"
)
Http.get(host + '/' + Configuration.prerelease_versions_file).body
end
##
# Fetches the `.gem` file of a given Gem and version.
#
# @param [String] name
# @param [String] version
# @return [String]
#
def fetch_gem(filename)
Utils.logger.info(
"Fetching gem #{filename} on #{@host}"
)
Http.get(host + "/gems/#{filename}").body
end
##
# Fetches the `.gemspec.rz` file of a given Gem and version.
#
# @param [String] filename
# @return [String]
#
def fetch_gemspec(filename)
Utils.logger.info(
"Fetching gemspec #{filename} on #{@host}"
)
marshal = Gemirro::Configuration.marshal_identifier
Http.get(host + "/quick/#{marshal}/#{filename}").body
end
##
# Adds a new Gem to the source.
#
# @param [String] name
# @param [String] requirement
#
def gem(name, requirement = nil)
gems << Gem.new(name, requirement)
end
end
|
shadowbq/snort-thresholds | lib/threshold/thresholds.rb | Threshold.Thresholds.reject | ruby | def reject(&blk)
if block_given?
Thresholds.new(@thresholds.reject(&blk))
else
Thresholds.new(@thresholds.reject)
end
end | Returns a new Threshold Object | train | https://github.com/shadowbq/snort-thresholds/blob/e3e9d1b10c2460846e1779fda67e8bec0422f53e/lib/threshold/thresholds.rb#L119-L125 | class Thresholds
extend Forwardable
attr_accessor :file, :readonly
def_delegators :@thresholds, :<<, :length, :push, :pop, :first, :last, :<=>, :==, :clear, :[], :[]=, :shift, :unshift, :each, :sort!, :shuffle!, :collect!, :map!, :reject!, :delete_if, :select!, :keep_if, :index, :include?
def initialize(thresholds = [])
@thresholds = thresholds
end
# Write changes to the file
def flush
begin
valid_existing_file?(@file)
raise ReadOnlyThresholdsFile if @readonly
hash = current_hash
file = File.open(@file, 'w+')
raise ThresholdAtomicLockFailure, 'The @file state/hash changed before we could flush the file' unless stored_hash == hash
file.write self.sort.to_s
file.close
rescue NonExistantThresholdFile
raise ReadOnlyThresholdsFile if @readonly
file = File.open(@file, 'w')
file.write self.sort.to_s
file.close
end
stored_hash=current_hash
return true
end
# Clears current collection and Read in the thresholds.conf file
def loadfile!
@thresholds.clear
loadfile
end
# Append in the thresholds.conf file to current collection
def loadfile
valid_existing_file?(@file)
results = Threshold::Parser.new(@file)
@stored_hash= results.filehash
#puts stored_hash
results.caps.each do |result|
builder = Threshold::Builder.new(result)
self << builder.build
end
end
# Check if all objects in the Threshold Instance report .valid?
def valid?
begin
self.each do |threshold|
if threshold.respond_to?(:valid?)
return false unless threshold.valid?
else
raise InvalidThresholdsObject, "Container object has unknown objects"
end
end
return true
rescue InvalidThresholdsObject
return false
end
end
# Printer
# Pass (true) to_s to skip the printing of InternalObjects.comment
def to_s(skip = false)
output = ""
raise InvalidThresholdsObject, "Container object has unknown objects" unless valid?
self.each do |threshold|
output << threshold.to_s(skip) + "\n"
end
return output
end
# The calculated hash of the threshold.conf file at load time.
def stored_hash
@stored_hash
end
def to_a
@thresholds
end
## Forwardable Corrections:
## Corrected for forwardable due to Core Array returning new Arrays on the methods.
# Array(@thresholds) Creates a new Array on @threshold.sort so.. direct forwardable delegation fails.
# Returns a new Threshold Object
def sort
Thresholds.new(@thresholds.sort)
end
# Returns a new Threshold Object
def reverse
Thresholds.new(@thresholds.reverse)
end
# Returns a new Threshold Object
def shuffle
Thresholds.new(@thresholds.shuffle)
end
# Returns a new Threshold Object
# Returns a new Threshold Object
def select(&blk)
if block_given?
Thresholds.new(@thresholds.select(&blk))
else
Thresholds.new(@thresholds.select)
end
end
#Uniques by default to printable output
# Returns a new Threshold Object
def uniq(&blk)
if block_given?
Thresholds.new(@thresholds.uniq(&blk))
else
Thresholds.new(@thresholds.uniq{ |lineitem| lineitem.to_s(true) })
end
end
## Complex SET Methods
## &(union), | (intersect), + (concat), - (Difference)
# + (concat)
# Returns a new Threshold Object
def +(an0ther)
Thresholds.new(@thresholds + an0ther.to_a)
end
# | (intersect)
# Returns a new Threshold Object
def |(an0ther)
Thresholds.new(@thresholds | an0ther.to_a)
end
# & (union)
# Returns a new Threshold Object
def &(an0ther)
Thresholds.new(@thresholds & an0ther.to_a)
end
# - (Difference)
# Returns a new Threshold Object
def -(an0ther)
Thresholds.new(@thresholds - an0ther.to_a)
end
# Returns a new Threshold Object with just suppressions
def suppressions(&blk)
if block_given?
self.suppressions.select(&blk)
else
Thresholds.new(@thresholds.select{|t| t.class.to_s == "Threshold::Suppression"})
end
end
# Returns a new Threshold Object with just event_filters
def event_filters(&blk)
if block_given?
self.event_filters.select(&blk)
else
Thresholds.new(@thresholds.select{|t| t.class.to_s == "Threshold::EventFilter"})
end
end
# Returns a new Threshold Object with just rate_filters
def rate_filters(&blk)
if block_given?
self.rate_filters.select(&blk)
else
Thresholds.new(@thresholds.select{|t| t.class.to_s == "Threshold::RateFilter"})
end
end
private
def stored_hash=(foo)
@stored_hash=foo
end
def current_hash
file = File.open(@file, 'rb+')
file.flock(File::LOCK_EX)
hash = Digest::MD5.file @file
file.close
return hash
end
def valid_existing_file?(file)
if file !=nil
raise NonExistantThresholdFile, "Missing threshold.conf" unless (File.file?(file) and File.exists?(file))
else
raise MissingThresholdFileConfiguration, "Missing threshold.conf path. See README for Usage."
end
return true
end
end
|
mailgun/mailgun-ruby | lib/railgun/mailer.rb | Railgun.Mailer.deliver! | ruby | def deliver!(mail)
mg_message = Railgun.transform_for_mailgun(mail)
response = @mg_client.send_message(@domain, mg_message)
if response.code == 200 then
mg_id = response.to_h['id']
mail.message_id = mg_id
end
response
end | Initialize the Railgun mailer.
@param [Hash] config Hash of config values, typically from `app_config.action_mailer.mailgun_config` | train | https://github.com/mailgun/mailgun-ruby/blob/265efffd51209b0170a3225bbe945b649643465a/lib/railgun/mailer.rb#L42-L51 | class Mailer
# List of the headers that will be ignored when copying headers from `mail.header_fields`
IGNORED_HEADERS = %w[ to from subject ]
# [Hash] config ->
# Requires *at least* `api_key` and `domain` keys.
attr_accessor :config, :domain, :settings
# Initialize the Railgun mailer.
#
# @param [Hash] config Hash of config values, typically from `app_config.action_mailer.mailgun_config`
def initialize(config)
@config = config
[:api_key, :domain].each do |k|
raise Railgun::ConfigurationError.new("Config requires `#{k}` key", @config) unless @config.has_key?(k)
end
@mg_client = Mailgun::Client.new(config[:api_key], config[:api_host] || 'api.mailgun.net', config[:api_version] || 'v3', config[:api_ssl].nil? ? true : config[:api_ssl])
@domain = @config[:domain]
# To avoid exception in mail gem v2.6
@settings = { return_response: true }
if (@config[:fake_message_send] || false)
Rails.logger.info "NOTE: fake message sending has been enabled for mailgun-ruby!"
@mg_client.enable_test_mode!
end
end
def mailgun_client
@mg_client
end
end
|
senchalabs/jsduck | lib/jsduck/tag/overrides.rb | JsDuck::Tag.Overrides.format | ruby | def format(m, formatter)
m[:overrides].each do |o|
label = o[:owner] + "." + o[:name]
o[:link] = formatter.link(o[:owner], o[:name], label, m[:tagname], m[:static])
end
end | Generate HTML links from :overrides data. | train | https://github.com/senchalabs/jsduck/blob/febef5558ecd05da25f5c260365acc3afd0cafd8/lib/jsduck/tag/overrides.rb#L17-L22 | class Overrides < Tag
def initialize
@tagname = :overrides
@html_position = POS_OVERRIDES
end
# Generate HTML links from :overrides data.
def to_html(m)
"<p>Overrides: " + m[:overrides].map {|o| o[:link] }.join(", ") + "</p>"
end
end
|
rmagick/rmagick | lib/rmagick_internal.rb | Magick.Image.matte_replace | ruby | def matte_replace(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.transparent(target)
end | Make transparent all pixels that are the same color as the
pixel at (x, y). | train | https://github.com/rmagick/rmagick/blob/ef6688ed9d76bf123c2ea1a483eff8635051adb7/lib/rmagick_internal.rb#L917-L922 | class Image
include Comparable
alias affinity remap
# Provide an alternate version of Draw#annotate, for folks who
# want to find it in this class.
def annotate(draw, width, height, x, y, text, &block)
check_destroyed
draw.annotate(self, width, height, x, y, text, &block)
self
end
# Set the color at x,y
def color_point(x, y, fill)
f = copy
f.pixel_color(x, y, fill)
f
end
# Set all pixels that have the same color as the pixel at x,y and
# are neighbors to the fill color
def color_floodfill(x, y, fill)
target = pixel_color(x, y)
color_flood_fill(target, fill, x, y, Magick::FloodfillMethod)
end
# Set all pixels that are neighbors of x,y and are not the border color
# to the fill color
def color_fill_to_border(x, y, fill)
color_flood_fill(border_color, fill, x, y, Magick::FillToBorderMethod)
end
# Set all pixels to the fill color. Very similar to Image#erase!
# Accepts either String or Pixel arguments
def color_reset!(fill)
save = background_color
# Change the background color _outside_ the begin block
# so that if this object is frozen the exeception will be
# raised before we have to handle it explicitly.
self.background_color = fill
begin
erase!
ensure
self.background_color = save
end
self
end
# Used by ImageList methods - see ImageList#cur_image
def cur_image
self
end
# Thanks to Russell Norris!
def each_pixel
get_pixels(0, 0, columns, rows).each_with_index do |p, n|
yield(p, n % columns, n / columns)
end
self
end
# Retrieve EXIF data by entry or all. If one or more entry names specified,
# return the values associated with the entries. If no entries specified,
# return all entries and values. The return value is an array of [name,value]
# arrays.
def get_exif_by_entry(*entry)
ary = []
if entry.length.zero?
exif_data = self['EXIF:*']
exif_data.split("\n").each { |exif| ary.push(exif.split('=')) } if exif_data
else
get_exif_by_entry # ensure properties is populated with exif data
entry.each do |name|
rval = self["EXIF:#{name}"]
ary.push([name, rval])
end
end
ary
end
# Retrieve EXIF data by tag number or all tag/value pairs. The return value is a hash.
def get_exif_by_number(*tag)
hash = {}
if tag.length.zero?
exif_data = self['EXIF:!']
if exif_data
exif_data.split("\n").each do |exif|
tag, value = exif.split('=')
tag = tag[1, 4].hex
hash[tag] = value
end
end
else
get_exif_by_number # ensure properties is populated with exif data
tag.each do |num|
rval = self[format('#%04X', num.to_i)]
hash[num] = rval == 'unknown' ? nil : rval
end
end
hash
end
# Retrieve IPTC information by record number:dataset tag constant defined in
# Magick::IPTC, above.
def get_iptc_dataset(ds)
self['IPTC:' + ds]
end
# Iterate over IPTC record number:dataset tags, yield for each non-nil dataset
def each_iptc_dataset
Magick::IPTC.constants.each do |record|
rec = Magick::IPTC.const_get(record)
rec.constants.each do |dataset|
data_field = get_iptc_dataset(rec.const_get(dataset))
yield(dataset, data_field) unless data_field.nil?
end
end
nil
end
# Patches problematic change to the order of arguments in 1.11.0.
# Before this release, the order was
# black_point, gamma, white_point
# RMagick 1.11.0 changed this to
# black_point, white_point, gamma
# This fix tries to determine if the arguments are in the old order and
# if so, swaps the gamma and white_point arguments. Then it calls
# level2, which simply accepts the arguments as given.
# Inspect the gamma and white point values and swap them if they
# look like they're in the old order.
# (Thanks to Al Evans for the suggestion.)
def level(black_point = 0.0, white_point = nil, gamma = nil)
black_point = Float(black_point)
white_point ||= Magick::QuantumRange - black_point
white_point = Float(white_point)
gamma_arg = gamma
gamma ||= 1.0
gamma = Float(gamma)
if gamma.abs > 10.0 || white_point.abs <= 10.0 || white_point.abs < gamma.abs
gamma, white_point = white_point, gamma
white_point = Magick::QuantumRange - black_point unless gamma_arg
end
level2(black_point, white_point, gamma)
end
# These four methods are equivalent to the Draw#matte method
# with the "Point", "Replace", "Floodfill", "FilltoBorder", and
# "Replace" arguments, respectively.
# Make the pixel at (x,y) transparent.
def matte_point(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
pixel = f.pixel_color(x, y)
pixel.opacity = TransparentOpacity
f.pixel_color(x, y, pixel)
f
end
# Make transparent all pixels that are the same color as the
# pixel at (x, y).
# Make transparent any pixel that matches the color of the pixel
# at (x,y) and is a neighbor.
def matte_floodfill(x, y)
f = copy
f.opacity = OpaqueOpacity unless f.alpha?
target = f.pixel_color(x, y)
f.matte_flood_fill(target, TransparentOpacity,
x, y, FloodfillMethod)
end
# Make transparent any neighbor pixel that is not the border color.
def matte_fill_to_border(x, y)
f = copy
f.opacity = Magick::OpaqueOpacity unless f.alpha?
f.matte_flood_fill(border_color, TransparentOpacity,
x, y, FillToBorderMethod)
end
# Make all pixels transparent.
def matte_reset!
self.opacity = Magick::TransparentOpacity
self
end
# Force an image to exact dimensions without changing the aspect ratio.
# Resize and crop if necessary. (Thanks to Jerett Taylor!)
def resize_to_fill(ncols, nrows = nil, gravity = CenterGravity)
copy.resize_to_fill!(ncols, nrows, gravity)
end
def resize_to_fill!(ncols, nrows = nil, gravity = CenterGravity)
nrows ||= ncols
if ncols != columns || nrows != rows
scale = [ncols / columns.to_f, nrows / rows.to_f].max
resize!(scale * columns + 0.5, scale * rows + 0.5)
end
crop!(gravity, ncols, nrows, true) if ncols != columns || nrows != rows
self
end
# Preserve aliases used < RMagick 2.0.1
alias crop_resized resize_to_fill
alias crop_resized! resize_to_fill!
# Convenience method to resize retaining the aspect ratio.
# (Thanks to Robert Manni!)
def resize_to_fit(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize(ncols, nrows)
end
end
def resize_to_fit!(cols, rows = nil)
rows ||= cols
change_geometry(Geometry.new(cols, rows)) do |ncols, nrows|
resize!(ncols, nrows)
end
end
# Replace matching neighboring pixels with texture pixels
def texture_floodfill(x, y, texture)
target = pixel_color(x, y)
texture_flood_fill(target, texture, x, y, FloodfillMethod)
end
# Replace neighboring pixels to border color with texture pixels
def texture_fill_to_border(x, y, texture)
texture_flood_fill(border_color, texture, x, y, FillToBorderMethod)
end
# Construct a view. If a block is present, yield and pass the view
# object, otherwise return the view object.
def view(x, y, width, height)
view = View.new(self, x, y, width, height)
return view unless block_given?
begin
yield(view)
ensure
view.sync
end
nil
end
# Magick::Image::View class
class View
attr_reader :x, :y, :width, :height
attr_accessor :dirty
def initialize(img, x, y, width, height)
img.check_destroyed
Kernel.raise ArgumentError, "invalid geometry (#{width}x#{height}+#{x}+#{y})" if width <= 0 || height <= 0
Kernel.raise RangeError, "geometry (#{width}x#{height}+#{x}+#{y}) exceeds image boundary" if x < 0 || y < 0 || (x + width) > img.columns || (y + height) > img.rows
@view = img.get_pixels(x, y, width, height)
@img = img
@x = x
@y = y
@width = width
@height = height
@dirty = false
end
def [](*args)
rows = Rows.new(@view, @width, @height, args)
rows.add_observer(self)
rows
end
# Store changed pixels back to image
def sync(force = false)
@img.store_pixels(x, y, width, height, @view) if @dirty || force
@dirty || force
end
# Get update from Rows - if @dirty ever becomes
# true, don't change it back to false!
def update(rows)
@dirty = true
rows.delete_observer(self) # No need to tell us again.
nil
end
# Magick::Image::View::Pixels
# Defines channel attribute getters/setters
class Pixels < Array
include Observable
# Define a getter and a setter for each channel.
%i[red green blue opacity].each do |c|
module_eval <<-END_EVAL
def #{c}
return collect { |p| p.#{c} }
end
def #{c}=(v)
each { |p| p.#{c} = v }
changed
notify_observers(self)
nil
end
END_EVAL
end
end # class Magick::Image::View::Pixels
# Magick::Image::View::Rows
class Rows
include Observable
def initialize(view, width, height, rows)
@view = view
@width = width
@height = height
@rows = rows
end
def [](*args)
cols(args)
# Both View::Pixels and Magick::Pixel implement Observable
if @unique
pixels = @view[@rows[0] * @width + @cols[0]]
pixels.add_observer(self)
else
pixels = View::Pixels.new
each do |x|
p = @view[x]
p.add_observer(self)
pixels << p
end
end
pixels
end
def []=(*args)
rv = args.delete_at(-1) # get rvalue
unless rv.is_a?(Pixel) # must be a Pixel or a color name
begin
rv = Pixel.from_color(rv)
rescue TypeError
Kernel.raise TypeError, "cannot convert #{rv.class} into Pixel"
end
end
cols(args)
each { |x| @view[x] = rv.dup }
changed
notify_observers(self)
nil
end
# A pixel has been modified. Tell the view.
def update(pixel)
changed
notify_observers(self)
pixel.delete_observer(self) # Don't need to hear again.
nil
end
private
def cols(*args)
@cols = args[0] # remove the outermost array
@unique = false
# Convert @rows to an Enumerable object
case @rows.length
when 0 # Create a Range for all the rows
@rows = Range.new(0, @height, true)
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @rows.first.respond_to? :each
@rows = @rows.first
else
@rows = Integer(@rows.first)
@rows += @height if @rows < 0
Kernel.raise IndexError, "index [#{@rows}] out of range" if @rows < 0 || @rows > @height - 1
# Convert back to an array
@rows = Array.new(1, @rows)
@unique = true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@rows[0])
length = Integer(@rows[1])
# Negative start -> start from last row
start += @height if start < 0
if start > @height || start < 0 || length < 0
Kernel.raise IndexError, "index [#{@rows.first}] out of range"
elsif start + length > @height
length = @height - length
length = [length, 0].max
end
# Create a Range for the specified set of rows
@rows = Range.new(start, start + length, true)
end
case @cols.length
when 0 # all rows
@cols = Range.new(0, @width, true) # convert to range
@unique = false
when 1 # Range, Array, or a single integer
# if the single element is already an Enumerable
# object, get it.
if @cols.first.respond_to? :each
@cols = @cols.first
@unique = false
else
@cols = Integer(@cols.first)
@cols += @width if @cols < 0
Kernel.raise IndexError, "index [#{@cols}] out of range" if @cols < 0 || @cols > @width - 1
# Convert back to array
@cols = Array.new(1, @cols)
@unique &&= true
end
when 2
# A pair of integers representing the starting column and the number of columns
start = Integer(@cols[0])
length = Integer(@cols[1])
# Negative start -> start from last row
start += @width if start < 0
if start > @width || start < 0 || length < 0
# nop
elsif start + length > @width
length = @width - length
length = [length, 0].max
end
# Create a Range for the specified set of columns
@cols = Range.new(start, start + length, true)
@unique = false
end
end
# iterator called from subscript methods
def each
maxrows = @height - 1
maxcols = @width - 1
@rows.each do |j|
Kernel.raise IndexError, "index [#{j}] out of range" if j > maxrows
@cols.each do |i|
Kernel.raise IndexError, "index [#{i}] out of range" if i > maxcols
yield j * @width + i
end
end
nil # useless return value
end
end # class Magick::Image::View::Rows
end # class Magick::Image::View
end # class Magick::Image
|
mojombo/chronic | lib/chronic/repeaters/repeater_day.rb | Chronic.RepeaterDay.next | ruby | def next(pointer)
super
unless @current_day_start
@current_day_start = Chronic.time_class.local(@now.year, @now.month, @now.day)
end
direction = pointer == :future ? 1 : -1
@current_day_start += direction * DAY_SECONDS
Span.new(@current_day_start, @current_day_start + DAY_SECONDS)
end | (24 * 60 * 60) | train | https://github.com/mojombo/chronic/blob/2b1eae7ec440d767c09e0b1a7f0e9bcf30ce1d6c/lib/chronic/repeaters/repeater_day.rb#L10-L21 | class RepeaterDay < Repeater #:nodoc:
DAY_SECONDS = 86_400 # (24 * 60 * 60)
def initialize(type, width = nil, options = {})
super
@current_day_start = nil
end
def this(pointer = :future)
super
case pointer
when :future
day_begin = Chronic.construct(@now.year, @now.month, @now.day, @now.hour)
day_end = Chronic.construct(@now.year, @now.month, @now.day) + DAY_SECONDS
when :past
day_begin = Chronic.construct(@now.year, @now.month, @now.day)
day_end = Chronic.construct(@now.year, @now.month, @now.day, @now.hour)
when :none
day_begin = Chronic.construct(@now.year, @now.month, @now.day)
day_end = Chronic.construct(@now.year, @now.month, @now.day) + DAY_SECONDS
end
Span.new(day_begin, day_end)
end
def offset(span, amount, pointer)
direction = pointer == :future ? 1 : -1
span + direction * amount * DAY_SECONDS
end
def width
DAY_SECONDS
end
def to_s
super << '-day'
end
end
|
projectcypress/health-data-standards | lib/hqmf-parser/2.0/population_criteria.rb | HQMF2.PopulationCriteria.handle_type | ruby | def handle_type(id_generator)
if @type != 'AGGREGATE'
# Generate the precondition for this population
if @preconditions.length > 1 ||
(@preconditions.length == 1 && @preconditions[0].conjunction != conjunction_code)
@preconditions = [Precondition.new(id_generator.next_id, conjunction_code, @preconditions)]
end
else
# Extract the data criteria this population references
dc = handle_observation_criteria
@preconditions = [Precondition.new(id_generator.next_id, nil, nil, false, HQMF2::Reference.new(dc.id))]
end
end | Create a new population criteria from the supplied HQMF entry
@param [Nokogiri::XML::Element] the HQMF entry
Handles how the code should deal with the type definition (aggregate vs non-aggregate) | train | https://github.com/projectcypress/health-data-standards/blob/252d4f0927c513eacde6b9ea41b76faa1423c34b/lib/hqmf-parser/2.0/population_criteria.rb#L27-L39 | class PopulationCriteria
include HQMF2::Utilities
attr_reader :preconditions, :id, :hqmf_id, :title, :aggregator, :comments
# need to do this to allow for setting the type to OBSERV for
attr_accessor :type
# Create a new population criteria from the supplied HQMF entry
# @param [Nokogiri::XML::Element] the HQMF entry
def initialize(entry, doc, id_generator)
@id_generator = id_generator
@doc = doc
@entry = entry
setup_derived_entry_elements(id_generator)
# modify type to meet current expected population names
@type = 'IPP' if @type == 'IPOP' || @type == 'IPPOP'
@comments = nil if comments.empty?
# MEAN is handled in current code. Changed since it should have the same effect
@aggregator = 'MEAN' if @aggregator == 'AVERAGE'
@hqmf_id = @type unless @hqmf_id # The id extension is not required, if it's not provided use the code
handle_type(id_generator)
end
# Handles how the code should deal with the type definition (aggregate vs non-aggregate)
# Handles extracting elements from the entry
def setup_derived_entry_elements(id_generator)
@hqmf_id = attr_val('./*/cda:id/@root') || attr_val('./*/cda:typeId/@extension')
@title = attr_val('./*/cda:code/cda:displayName/@value').try(:titleize)
@type = attr_val('./*/cda:code/@code')
@comments = @entry.xpath('./*/cda:text/cda:xml/cda:qdmUserComments/cda:item/text()', HQMF2::Document::NAMESPACES)
.map(&:content)
handle_preconditions(id_generator)
obs_test = attr_val('./cda:measureObservationDefinition/@classCode')
# If there are no measure observations, or there is a title, then there are no aggregations to extract
return unless !@title && obs_test.to_s == 'OBS'
@title = attr_val('../cda:code/cda:displayName/@value')
@aggregator = attr_val('./cda:measureObservationDefinition/cda:methodCode/cda:item/@code')
end
# specifically handles extracting the preconditions for the population criteria
def handle_preconditions(id_generator)
# Nest multiple preconditions under a single root precondition
@preconditions = @entry.xpath('./*/cda:precondition[not(@nullFlavor)]', HQMF2::Document::NAMESPACES)
.collect do |pre|
precondition = Precondition.parse(pre, @doc, id_generator)
precondition.reference.nil? && precondition.preconditions.empty? ? nil : precondition
end
# Remove uneeded nils from the array
@preconditions.compact!
end
# extracts out any measure observation definitons, creating from them the proper criteria to generate a precondition
def handle_observation_criteria
exp = @entry.at_xpath('./cda:measureObservationDefinition/cda:value/cda:expression/@value',
HQMF2::Document::NAMESPACES)
# Measure Observations criteria rely on computed expressions. If it doesn't have one,
# then it is likely formatted improperly.
fail 'Measure Observations criteria is missing computed expression(s) ' if exp.nil?
parts = exp.to_s.split('-')
dc = parse_parts_to_dc(parts)
@doc.add_data_criteria(dc)
# Update reference_ids with any newly referenced data criteria
dc.children_criteria.each { |cc| @doc.add_reference_id(cc) } unless dc.children_criteria.nil?
dc
end
# generates the value given in an expression based on the number of criteria it references.
def parse_parts_to_dc(parts)
case parts.length
when 1
# If there is only one part, it is a reference to an existing data criteria's value
@doc.find_criteria_by_lvn(parts.first.strip.split('.')[0])
when 2
# If there are two parts, there is a computation performed, specifically time difference, on the two criteria
children = parts.collect { |p| @doc.find_criteria_by_lvn(p.strip.split('.')[0]).id }
id = "GROUP_TIMEDIFF_#{@id_generator.next_id}"
HQMF2::DataCriteriaWrapper.new(id: id,
title: id,
subset_operators: [HQMF::SubsetOperator.new('DATETIMEDIFF', nil)],
children_criteria: children,
derivation_operator: HQMF::DataCriteria::XPRODUCT,
type: 'derived',
definition: 'derived',
negation: false,
source_data_criteria: id
)
else
# If there are neither one or 2 parts, the code should fail
fail "No defined extraction method to handle #{parts.length} parts"
end
end
def create_human_readable_id(id)
@id = id
end
# Get the conjunction code, ALL_TRUE or AT_LEAST_ONE_TRUE
# @return [String] conjunction code
def conjunction_code
case @type
when HQMF::PopulationCriteria::IPP, HQMF::PopulationCriteria::DENOM, HQMF::PopulationCriteria::NUMER,
HQMF::PopulationCriteria::MSRPOPL, HQMF::PopulationCriteria::STRAT
HQMF::Precondition::ALL_TRUE
when HQMF::PopulationCriteria::DENEXCEP, HQMF::PopulationCriteria::DENEX, HQMF::PopulationCriteria::MSRPOPLEX,
HQMF::PopulationCriteria::NUMEX
HQMF::Precondition::AT_LEAST_ONE_TRUE
else
fail "Unknown population type [#{@type}]"
end
end
# Generates this classes hqmf-model equivalent
def to_model
mps = preconditions.collect(&:to_model)
HQMF::PopulationCriteria.new(id, hqmf_id, type, mps, title, aggregator, comments)
end
end
|
kontena/k8s-client | lib/k8s/client.rb | K8s.Client.get_resources | ruby | def get_resources(resources)
# prefetch api resources, skip missing APIs
resource_apis = apis(resources.map(&:apiVersion), prefetch_resources: true, skip_missing: true)
# map each resource to excon request options, or nil if resource is not (yet) defined
requests = resources.zip(resource_apis).map{ |resource, api_client|
next nil unless api_client.api_resources?
resource_client = api_client.client_for_resource(resource)
{
method: 'GET',
path: resource_client.path(resource.metadata.name, namespace: resource.metadata.namespace),
response_class: resource_client.resource_class
}
}
# map non-nil requests to response objects, or nil for nil request options
Util.compact_map(requests) { |reqs|
@transport.requests(*reqs, skip_missing: true)
}
end | Returns nils for any resources that do not exist.
This includes custom resources that were not yet defined.
@param resources [Array<K8s::Resource>]
@return [Array<K8s::Resource, nil>] matching resources array 1:1 | train | https://github.com/kontena/k8s-client/blob/efa19f43202a5d8840084a804afb936a57dc5bdd/lib/k8s/client.rb#L232-L253 | class Client
# @param config [Phraos::Kube::Config]
# @param namespace [String] @see #initialize
# @param options [Hash] @see Transport.config
# @return [K8s::Client]
def self.config(config, namespace: nil, **options)
new(
Transport.config(config, **options),
namespace: namespace
)
end
# An K8s::Client instance from in-cluster config within a kube pod, using the kubernetes service envs and serviceaccount secrets
# @see K8s::Transport#in_cluster_config
#
# @param namespace [String] default namespace for all operations
# @param options [Hash] options passed to transport, @see Transport#in_cluster_config
# @return [K8s::Client]
# @raise [K8s::Error::Config,Errno::ENOENT,Errno::EACCES]
def self.in_cluster_config(namespace: nil, **options)
new(Transport.in_cluster_config(**options), namespace: namespace)
end
# Attempts to create a K8s::Client instance automatically using environment variables, existing configuration
# files or in cluster configuration.
#
# Look-up order:
# - KUBE_TOKEN, KUBE_CA, KUBE_SERVER environment variables
# - KUBECONFIG environment variable
# - $HOME/.kube/config file
# - In cluster configuration
#
# Will raise when no means of configuration is available
#
# @param options [Hash] default namespace for all operations
# @raise [K8s::Error::Config,Errno::ENOENT,Errno::EACCES]
# @return [K8s::Client]
def self.autoconfig(namespace: nil, **options)
if ENV.values_at('KUBE_TOKEN', 'KUBE_CA', 'KUBE_SERVER').none? { |v| v.nil? || v.empty? }
unless Base64.decode64(ENV['KUBE_CA']).match?(/CERTIFICATE/)
raise ArgumentError, 'KUBE_CA does not seem to be base64 encoded'
end
begin
token = options[:auth_token] || Base64.strict_decode64(ENV['KUBE_TOKEN'])
rescue ArgumentError
raise ArgumentError, 'KUBE_TOKEN does not seem to be base64 encoded'
end
configuration = K8s::Config.build(server: ENV['KUBE_SERVER'], ca: ENV['KUBE_CA'], auth_token: token)
elsif !ENV['KUBECONFIG'].to_s.empty?
configuration = K8s::Config.from_kubeconfig_env(ENV['KUBECONFIG'])
elsif File.exist?(File.join(Dir.home, '.kube', 'config'))
configuration = K8s::Config.load_file(File.join(Dir.home, '.kube', 'config'))
end
if configuration
config(configuration, namespace: namespace, **options)
else
in_cluster_config(namespace: namespace, **options)
end
end
include MonitorMixin
attr_reader :transport
# @param transport [K8s::Transport]
# @param namespace [String, nil] default namespace for all operations
def initialize(transport, namespace: nil)
@transport = transport
@namespace = namespace
@api_clients = {}
super()
end
# @raise [K8s::Error]
# @return [K8s::API::Version]
def version
@version ||= @transport.version
end
# @param api_version [String] "group/version" or "version" (core)
# @return [APIClient]
def api(api_version = 'v1')
@api_clients[api_version] ||= APIClient.new(@transport, api_version)
end
# Force-update /apis cache.
# Required if creating new CRDs/apiservices.
#
# @return [Array<String>]
def api_groups!
synchronize do
@api_groups = @transport.get(
'/apis',
response_class: K8s::API::MetaV1::APIGroupList
).groups.flat_map{ |api_group| api_group.versions.map(&:groupVersion) }
@api_clients.clear
end
@api_groups
end
# Cached /apis preferred group apiVersions
# @return [Array<String>]
def api_groups
@api_groups || api_groups!
end
# @param api_versions [Array<String>] defaults to all APIs
# @param prefetch_resources [Boolean] prefetch any missing api_resources for each api_version
# @param skip_missing [Boolean] return APIClient without api_resources? if 404
# @return [Array<APIClient>]
def apis(api_versions = nil, prefetch_resources: false, skip_missing: false)
api_versions ||= ['v1'] + api_groups
if prefetch_resources
# api groups that are missing their api_resources
api_paths = api_versions
.uniq
.reject{ |api_version| api(api_version).api_resources? }
.map{ |api_version| APIClient.path(api_version) }
# load into APIClient.api_resources=
begin
@transport.gets(*api_paths, response_class: K8s::API::MetaV1::APIResourceList, skip_missing: skip_missing).each do |api_resource_list|
api(api_resource_list.groupVersion).api_resources = api_resource_list.resources if api_resource_list
end
rescue K8s::Error::NotFound, K8s::Error::ServiceUnavailable # rubocop:disable Lint/HandleExceptions
# kubernetes api is in unstable state
# because this is only performance optimization, better to skip prefetch and move on
end
end
api_versions.map{ |api_version| api(api_version) }
end
# @param namespace [String, nil]
# @return [Array<K8s::ResourceClient>]
def resources(namespace: nil)
apis(prefetch_resources: true).map { |api|
begin
api.resources(namespace: namespace)
rescue K8s::Error::ServiceUnavailable, K8s::Error::NotFound
[]
end
}.flatten
end
# Pipeline list requests for multiple resource types.
#
# Returns flattened array with mixed resource kinds.
#
# @param resources [Array<K8s::ResourceClient>] default is all listable resources for api
# @param options @see K8s::ResourceClient#list
# @return [Array<K8s::Resource>]
def list_resources(resources = nil, **options)
cached_clients = @api_clients.size.positive?
resources ||= self.resources.select(&:list?)
begin
ResourceClient.list(resources, @transport, **options)
rescue K8s::Error::NotFound
raise unless cached_clients
cached_clients = false
api_groups!
retry
end
end
# @param resource [K8s::Resource]
# @param namespace [String, nil] default if resource is missing namespace
# @raise [K8s::Error::NotFound] API Group does not exist
# @raise [K8s::Error::UndefinedResource]
# @return [K8s::ResourceClient]
def client_for_resource(resource, namespace: nil)
api(resource.apiVersion).client_for_resource(resource, namespace: namespace)
end
# @param resource [K8s::Resource]
# @return [K8s::Resource]
def create_resource(resource)
client_for_resource(resource).create_resource(resource)
end
# @param resource [K8s::Resource]
# @return [K8s::Resource]
def get_resource(resource)
client_for_resource(resource).get_resource(resource)
end
# Returns nils for any resources that do not exist.
# This includes custom resources that were not yet defined.
#
# @param resources [Array<K8s::Resource>]
# @return [Array<K8s::Resource, nil>] matching resources array 1:1
# @param resource [K8s::Resource]
# @return [K8s::Resource]
def update_resource(resource)
client_for_resource(resource).update_resource(resource)
end
# @param resource [K8s::Resource]
# @param options [Hash]
# @see ResourceClient#delete for options
# @return [K8s::Resource]
def delete_resource(resource, **options)
client_for_resource(resource).delete_resource(resource, **options)
end
# @param resource [K8s::Resource]
# @param attrs [Hash]
# @return [K8s::Client]
def patch_resource(resource, attrs)
client_for_resource(resource).json_patch(resource.metadata.name, attrs)
end
end
|
rlisowski/open_flash_chart_2_plugin | lib/ofc2.rb | OFC2.OWJSON.method_missing | ruby | def method_missing(method_id, *arguments)
a = arguments[0] if arguments and arguments.size > 0
method = method_id.to_s
if method =~ /^(.*)(=)$/
self.instance_variable_set("@#{$1.gsub('_','__')}", a)
elsif method =~ /^(set_)(.*)$/
self.instance_variable_set("@#{$2.gsub('_','__')}", a)
elsif self.instance_variable_defined?("@#{method_id.to_s.gsub('_','__')}")
self.instance_variable_get("@#{method_id.to_s.gsub('_','__')}") # that will be return instance variable value or nil, handy
else
nil
end
end | if You use rails older that 2.3 probably you have to uncomment that method and add "config.gem 'json'" in config/enviroment.rb file
otherwise to_json method will not work propertly
# You can pass options to to_json method, but remember that they have no effects!!!
# argument 'options' is for rails compability
def to_json(options = {})
to_hash.to_json
end
method_missing handle setting and getting instance variables
You can set variable in two ways:
1. variable_name = value
1. set_variable_name(value)
you can get only alredy setted variables, otherwise return nil | train | https://github.com/rlisowski/open_flash_chart_2_plugin/blob/2ee7f7d3b81ee9c6773705d3b919df8688972361/lib/ofc2.rb#L27-L39 | module OWJSON
# return a hash of instance values
def to_hash
self.instance_values
end
alias :to_h :to_hash
# if You use rails older that 2.3 probably you have to uncomment that method and add "config.gem 'json'" in config/enviroment.rb file
# otherwise to_json method will not work propertly
# # You can pass options to to_json method, but remember that they have no effects!!!
# # argument 'options' is for rails compability
# def to_json(options = {})
# to_hash.to_json
# end
# method_missing handle setting and getting instance variables
# You can set variable in two ways:
# 1. variable_name = value
# 1. set_variable_name(value)
# you can get only alredy setted variables, otherwise return nil
end
|
hashicorp/vault-ruby | lib/vault/persistent.rb | Vault.PersistentHTTP.start | ruby | def start http
http.set_debug_output @debug_output if @debug_output
http.open_timeout = @open_timeout if @open_timeout
http.start
socket = http.instance_variable_get :@socket
if socket then # for fakeweb
@socket_options.each do |option|
socket.io.setsockopt(*option)
end
end
end | Starts the Net::HTTP +connection+ | train | https://github.com/hashicorp/vault-ruby/blob/02f0532a802ba1a2a0d8703a4585dab76eb9d864/lib/vault/persistent.rb#L694-L707 | class PersistentHTTP
##
# The beginning of Time
EPOCH = Time.at 0 # :nodoc:
##
# Is OpenSSL available? This test works with autoload
HAVE_OPENSSL = defined? OpenSSL::SSL # :nodoc:
##
# The default connection pool size is 1/4 the allowed open files.
DEFAULT_POOL_SIZE = 16
##
# The version of PersistentHTTP you are using
VERSION = '3.0.0'
##
# Exceptions rescued for automatic retry on ruby 2.0.0. This overlaps with
# the exception list for ruby 1.x.
RETRIED_EXCEPTIONS = [ # :nodoc:
(Net::ReadTimeout if Net.const_defined? :ReadTimeout),
IOError,
EOFError,
Errno::ECONNRESET,
Errno::ECONNABORTED,
Errno::EPIPE,
(OpenSSL::SSL::SSLError if HAVE_OPENSSL),
Timeout::Error,
].compact
##
# Error class for errors raised by PersistentHTTP. Various
# SystemCallErrors are re-raised with a human-readable message under this
# class.
class Error < StandardError; end
##
# Use this method to detect the idle timeout of the host at +uri+. The
# value returned can be used to configure #idle_timeout. +max+ controls the
# maximum idle timeout to detect.
#
# After
#
# Idle timeout detection is performed by creating a connection then
# performing a HEAD request in a loop until the connection terminates
# waiting one additional second per loop.
#
# NOTE: This may not work on ruby > 1.9.
def self.detect_idle_timeout uri, max = 10
uri = URI uri unless URI::Generic === uri
uri += '/'
req = Net::HTTP::Head.new uri.request_uri
http = new 'net-http-persistent detect_idle_timeout'
http.connection_for uri do |connection|
sleep_time = 0
http = connection.http
loop do
response = http.request req
$stderr.puts "HEAD #{uri} => #{response.code}" if $DEBUG
unless Net::HTTPOK === response then
raise Error, "bad response code #{response.code} detecting idle timeout"
end
break if sleep_time >= max
sleep_time += 1
$stderr.puts "sleeping #{sleep_time}" if $DEBUG
sleep sleep_time
end
end
rescue
# ignore StandardErrors, we've probably found the idle timeout.
ensure
return sleep_time unless $!
end
##
# This client's OpenSSL::X509::Certificate
attr_reader :certificate
##
# For Net::HTTP parity
alias cert certificate
##
# An SSL certificate authority. Setting this will set verify_mode to
# VERIFY_PEER.
attr_reader :ca_file
##
# A directory of SSL certificates to be used as certificate authorities.
# Setting this will set verify_mode to VERIFY_PEER.
attr_reader :ca_path
##
# An SSL certificate store. Setting this will override the default
# certificate store. See verify_mode for more information.
attr_reader :cert_store
##
# The ciphers allowed for SSL connections
attr_reader :ciphers
##
# Sends debug_output to this IO via Net::HTTP#set_debug_output.
#
# Never use this method in production code, it causes a serious security
# hole.
attr_accessor :debug_output
##
# Current connection generation
attr_reader :generation # :nodoc:
##
# Headers that are added to every request using Net::HTTP#add_field
attr_reader :headers
##
# Maps host:port to an HTTP version. This allows us to enable version
# specific features.
attr_reader :http_versions
##
# Maximum time an unused connection can remain idle before being
# automatically closed.
attr_accessor :idle_timeout
##
# Maximum number of requests on a connection before it is considered expired
# and automatically closed.
attr_accessor :max_requests
##
# The value sent in the Keep-Alive header. Defaults to 30. Not needed for
# HTTP/1.1 servers.
#
# This may not work correctly for HTTP/1.0 servers
#
# This method may be removed in a future version as RFC 2616 does not
# require this header.
attr_accessor :keep_alive
##
# A name for this connection. Allows you to keep your connections apart
# from everybody else's.
attr_reader :name
##
# Seconds to wait until a connection is opened. See Net::HTTP#open_timeout
attr_accessor :open_timeout
##
# Headers that are added to every request using Net::HTTP#[]=
attr_reader :override_headers
##
# This client's SSL private key
attr_reader :private_key
##
# For Net::HTTP parity
alias key private_key
##
# The URL through which requests will be proxied
attr_reader :proxy_uri
##
# List of host suffixes which will not be proxied
attr_reader :no_proxy
##
# Test-only accessor for the connection pool
attr_reader :pool # :nodoc:
##
# Seconds to wait until reading one block. See Net::HTTP#read_timeout
attr_accessor :read_timeout
##
# By default SSL sessions are reused to avoid extra SSL handshakes. Set
# this to false if you have problems communicating with an HTTPS server
# like:
#
# SSL_connect [...] read finished A: unexpected message (OpenSSL::SSL::SSLError)
attr_accessor :reuse_ssl_sessions
##
# An array of options for Socket#setsockopt.
#
# By default the TCP_NODELAY option is set on sockets.
#
# To set additional options append them to this array:
#
# http.socket_options << [Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, 1]
attr_reader :socket_options
##
# Current SSL connection generation
attr_reader :ssl_generation # :nodoc:
##
# SSL session lifetime
attr_reader :ssl_timeout
##
# SSL version to use.
#
# By default, the version will be negotiated automatically between client
# and server. Ruby 1.9 and newer only.
attr_reader :ssl_version
##
# Where this instance's last-use times live in the thread local variables
attr_reader :timeout_key # :nodoc:
##
# SSL verification callback. Used when ca_file or ca_path is set.
attr_reader :verify_callback
##
# Sets the depth of SSL certificate verification
attr_reader :verify_depth
##
# HTTPS verify mode. Defaults to OpenSSL::SSL::VERIFY_PEER which verifies
# the server certificate.
#
# If no ca_file, ca_path or cert_store is set the default system certificate
# store is used.
#
# You can use +verify_mode+ to override any default values.
attr_reader :verify_mode
##
# Enable retries of non-idempotent requests that change data (e.g. POST
# requests) when the server has disconnected.
#
# This will in the worst case lead to multiple requests with the same data,
# but it may be useful for some applications. Take care when enabling
# this option to ensure it is safe to POST or perform other non-idempotent
# requests to the server.
attr_accessor :retry_change_requests
##
# Creates a new PersistentHTTP.
#
# Set +name+ to keep your connections apart from everybody else's. Not
# required currently, but highly recommended. Your library name should be
# good enough. This parameter will be required in a future version.
#
# +proxy+ may be set to a URI::HTTP or :ENV to pick up proxy options from
# the environment. See proxy_from_env for details.
#
# In order to use a URI for the proxy you may need to do some extra work
# beyond URI parsing if the proxy requires a password:
#
# proxy = URI 'http://proxy.example'
# proxy.user = 'AzureDiamond'
# proxy.password = 'hunter2'
#
# Set +pool_size+ to limit the maximum number of connections allowed.
# Defaults to 1/4 the number of allowed file handles. You can have no more
# than this many threads with active HTTP transactions.
def initialize name=nil, proxy=nil, pool_size=DEFAULT_POOL_SIZE
@name = name
@debug_output = nil
@proxy_uri = nil
@no_proxy = []
@headers = {}
@override_headers = {}
@http_versions = {}
@keep_alive = 30
@open_timeout = nil
@read_timeout = nil
@idle_timeout = 5
@max_requests = nil
@socket_options = []
@ssl_generation = 0 # incremented when SSL session variables change
@socket_options << [Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1] if
Socket.const_defined? :TCP_NODELAY
@pool = PersistentHTTP::Pool.new size: pool_size do |http_args|
PersistentHTTP::Connection.new Net::HTTP, http_args, @ssl_generation
end
@certificate = nil
@ca_file = nil
@ca_path = nil
@ciphers = nil
@private_key = nil
@ssl_timeout = nil
@ssl_version = nil
@verify_callback = nil
@verify_depth = nil
@verify_mode = nil
@cert_store = nil
@generation = 0 # incremented when proxy URI changes
if HAVE_OPENSSL then
@verify_mode = OpenSSL::SSL::VERIFY_PEER
@reuse_ssl_sessions = OpenSSL::SSL.const_defined? :Session
end
@retry_change_requests = false
self.proxy = proxy if proxy
end
##
# Sets this client's OpenSSL::X509::Certificate
def certificate= certificate
@certificate = certificate
reconnect_ssl
end
# For Net::HTTP parity
alias cert= certificate=
##
# Sets the SSL certificate authority file.
def ca_file= file
@ca_file = file
reconnect_ssl
end
##
# Sets the SSL certificate authority path.
def ca_path= path
@ca_path = path
reconnect_ssl
end
##
# Overrides the default SSL certificate store used for verifying
# connections.
def cert_store= store
@cert_store = store
reconnect_ssl
end
##
# The ciphers allowed for SSL connections
def ciphers= ciphers
@ciphers = ciphers
reconnect_ssl
end
##
# Creates a new connection for +uri+
def connection_for uri
use_ssl = uri.scheme.downcase == 'https'
net_http_args = [uri.host, uri.port]
net_http_args.concat @proxy_args if
@proxy_uri and not proxy_bypass? uri.host, uri.port
connection = @pool.checkout net_http_args
http = connection.http
connection.ressl @ssl_generation if
connection.ssl_generation != @ssl_generation
if not http.started? then
ssl http if use_ssl
start http
elsif expired? connection then
reset connection
end
http.read_timeout = @read_timeout if @read_timeout
http.keep_alive_timeout = @idle_timeout if @idle_timeout
return yield connection
rescue Errno::ECONNREFUSED
address = http.proxy_address || http.address
port = http.proxy_port || http.port
raise Error, "connection refused: #{address}:#{port}"
rescue Errno::EHOSTDOWN
address = http.proxy_address || http.address
port = http.proxy_port || http.port
raise Error, "host down: #{address}:#{port}"
ensure
# Only perform checkin if we successfully checked a connection out
if connection
@pool.checkin net_http_args
end
end
##
# Returns an error message containing the number of requests performed on
# this connection
def error_message connection
connection.requests -= 1 # fixup
age = Time.now - connection.last_use
"after #{connection.requests} requests on #{connection.http.object_id}, " \
"last used #{age} seconds ago"
end
##
# URI::escape wrapper
def escape str
CGI.escape str if str
end
##
# URI::unescape wrapper
def unescape str
CGI.unescape str if str
end
##
# Returns true if the connection should be reset due to an idle timeout, or
# maximum request count, false otherwise.
def expired? connection
return true if @max_requests && connection.requests >= @max_requests
return false unless @idle_timeout
return true if @idle_timeout.zero?
Time.now - connection.last_use > @idle_timeout
end
##
# Starts the Net::HTTP +connection+
##
# Finishes the Net::HTTP +connection+
def finish connection
connection.finish
connection.http.instance_variable_set :@ssl_session, nil unless
@reuse_ssl_sessions
end
##
# Returns the HTTP protocol version for +uri+
def http_version uri
@http_versions["#{uri.host}:#{uri.port}"]
end
##
# Is +req+ idempotent according to RFC 2616?
def idempotent? req
case req
when Net::HTTP::Delete, Net::HTTP::Get, Net::HTTP::Head,
Net::HTTP::Options, Net::HTTP::Put, Net::HTTP::Trace then
true
end
end
##
# Is the request +req+ idempotent or is retry_change_requests allowed.
def can_retry? req
@retry_change_requests && !idempotent?(req)
end
##
# Adds "http://" to the String +uri+ if it is missing.
def normalize_uri uri
(uri =~ /^https?:/) ? uri : "http://#{uri}"
end
##
# Pipelines +requests+ to the HTTP server at +uri+ yielding responses if a
# block is given. Returns all responses recieved.
#
# See
# Net::HTTP::Pipeline[http://docs.seattlerb.org/net-http-pipeline/Net/HTTP/Pipeline.html]
# for further details.
#
# Only if <tt>net-http-pipeline</tt> was required before
# <tt>net-http-persistent</tt> #pipeline will be present.
def pipeline uri, requests, &block # :yields: responses
connection_for uri do |connection|
connection.http.pipeline requests, &block
end
end
##
# Sets this client's SSL private key
def private_key= key
@private_key = key
reconnect_ssl
end
# For Net::HTTP parity
alias key= private_key=
##
# Sets the proxy server. The +proxy+ may be the URI of the proxy server,
# the symbol +:ENV+ which will read the proxy from the environment or nil to
# disable use of a proxy. See #proxy_from_env for details on setting the
# proxy from the environment.
#
# If the proxy URI is set after requests have been made, the next request
# will shut-down and re-open all connections.
#
# The +no_proxy+ query parameter can be used to specify hosts which shouldn't
# be reached via proxy; if set it should be a comma separated list of
# hostname suffixes, optionally with +:port+ appended, for example
# <tt>example.com,some.host:8080</tt>.
def proxy= proxy
@proxy_uri = case proxy
when :ENV then proxy_from_env
when URI::HTTP then proxy
when nil then # ignore
else raise ArgumentError, 'proxy must be :ENV or a URI::HTTP'
end
@no_proxy.clear
if @proxy_uri then
@proxy_args = [
@proxy_uri.host,
@proxy_uri.port,
unescape(@proxy_uri.user),
unescape(@proxy_uri.password),
]
@proxy_connection_id = [nil, *@proxy_args].join ':'
if @proxy_uri.query then
@no_proxy = CGI.parse(@proxy_uri.query)['no_proxy'].join(',').downcase.split(',').map { |x| x.strip }.reject { |x| x.empty? }
end
end
reconnect
reconnect_ssl
end
##
# Creates a URI for an HTTP proxy server from ENV variables.
#
# If +HTTP_PROXY+ is set a proxy will be returned.
#
# If +HTTP_PROXY_USER+ or +HTTP_PROXY_PASS+ are set the URI is given the
# indicated user and password unless HTTP_PROXY contains either of these in
# the URI.
#
# The +NO_PROXY+ ENV variable can be used to specify hosts which shouldn't
# be reached via proxy; if set it should be a comma separated list of
# hostname suffixes, optionally with +:port+ appended, for example
# <tt>example.com,some.host:8080</tt>. When set to <tt>*</tt> no proxy will
# be returned.
#
# For Windows users, lowercase ENV variables are preferred over uppercase ENV
# variables.
def proxy_from_env
env_proxy = ENV['http_proxy'] || ENV['HTTP_PROXY']
return nil if env_proxy.nil? or env_proxy.empty?
uri = URI normalize_uri env_proxy
env_no_proxy = ENV['no_proxy'] || ENV['NO_PROXY']
# '*' is special case for always bypass
return nil if env_no_proxy == '*'
if env_no_proxy then
uri.query = "no_proxy=#{escape(env_no_proxy)}"
end
unless uri.user or uri.password then
uri.user = escape ENV['http_proxy_user'] || ENV['HTTP_PROXY_USER']
uri.password = escape ENV['http_proxy_pass'] || ENV['HTTP_PROXY_PASS']
end
uri
end
##
# Returns true when proxy should by bypassed for host.
def proxy_bypass? host, port
host = host.downcase
host_port = [host, port].join ':'
@no_proxy.each do |name|
return true if host[-name.length, name.length] == name or
host_port[-name.length, name.length] == name
end
false
end
##
# Forces reconnection of HTTP connections.
def reconnect
@generation += 1
end
##
# Forces reconnection of SSL connections.
def reconnect_ssl
@ssl_generation += 1
end
##
# Finishes then restarts the Net::HTTP +connection+
def reset connection
http = connection.http
finish connection
start http
rescue Errno::ECONNREFUSED
e = Error.new "connection refused: #{http.address}:#{http.port}"
e.set_backtrace $@
raise e
rescue Errno::EHOSTDOWN
e = Error.new "host down: #{http.address}:#{http.port}"
e.set_backtrace $@
raise e
end
##
# Makes a request on +uri+. If +req+ is nil a Net::HTTP::Get is performed
# against +uri+.
#
# If a block is passed #request behaves like Net::HTTP#request (the body of
# the response will not have been read).
#
# +req+ must be a Net::HTTPRequest subclass (see Net::HTTP for a list).
#
# If there is an error and the request is idempotent according to RFC 2616
# it will be retried automatically.
def request uri, req = nil, &block
retried = false
bad_response = false
uri = URI uri
req = request_setup req || uri
response = nil
connection_for uri do |connection|
http = connection.http
begin
connection.requests += 1
response = http.request req, &block
if req.connection_close? or
(response.http_version <= '1.0' and
not response.connection_keep_alive?) or
response.connection_close? then
finish connection
end
rescue Net::HTTPBadResponse => e
message = error_message connection
finish connection
raise Error, "too many bad responses #{message}" if
bad_response or not can_retry? req
bad_response = true
retry
rescue *RETRIED_EXCEPTIONS => e
request_failed e, req, connection if
retried or not can_retry? req
reset connection
retried = true
retry
rescue Errno::EINVAL, Errno::ETIMEDOUT => e # not retried on ruby 2
request_failed e, req, connection if retried or not can_retry? req
reset connection
retried = true
retry
rescue Exception => e
finish connection
raise
ensure
connection.last_use = Time.now
end
end
@http_versions["#{uri.host}:#{uri.port}"] ||= response.http_version
response
end
##
# Raises an Error for +exception+ which resulted from attempting the request
# +req+ on the +connection+.
#
# Finishes the +connection+.
def request_failed exception, req, connection # :nodoc:
due_to = "(due to #{exception.message} - #{exception.class})"
message = "too many connection resets #{due_to} #{error_message connection}"
finish connection
raise Error, message, exception.backtrace
end
##
# Creates a GET request if +req_or_uri+ is a URI and adds headers to the
# request.
#
# Returns the request.
def request_setup req_or_uri # :nodoc:
req = if URI === req_or_uri then
Net::HTTP::Get.new req_or_uri.request_uri
else
req_or_uri
end
@headers.each do |pair|
req.add_field(*pair)
end
@override_headers.each do |name, value|
req[name] = value
end
unless req['Connection'] then
req.add_field 'Connection', 'keep-alive'
req.add_field 'Keep-Alive', @keep_alive
end
req
end
##
# Shuts down all connections
#
# *NOTE*: Calling shutdown for can be dangerous!
#
# If any thread is still using a connection it may cause an error! Call
# #shutdown when you are completely done making requests!
def shutdown
@pool.available.shutdown do |http|
http.finish
end
end
##
# Enables SSL on +connection+
def ssl connection
connection.use_ssl = true
connection.ciphers = @ciphers if @ciphers
connection.ssl_timeout = @ssl_timeout if @ssl_timeout
connection.ssl_version = @ssl_version if @ssl_version
connection.verify_depth = @verify_depth
connection.verify_mode = @verify_mode
if OpenSSL::SSL::VERIFY_PEER == OpenSSL::SSL::VERIFY_NONE and
not Object.const_defined?(:I_KNOW_THAT_OPENSSL_VERIFY_PEER_EQUALS_VERIFY_NONE_IS_WRONG) then
warn <<-WARNING
!!!SECURITY WARNING!!!
The SSL HTTP connection to:
#{connection.address}:#{connection.port}
!!!MAY NOT BE VERIFIED!!!
On your platform your OpenSSL implementation is broken.
There is no difference between the values of VERIFY_NONE and VERIFY_PEER.
This means that attempting to verify the security of SSL connections may not
work. This exposes you to man-in-the-middle exploits, snooping on the
contents of your connection and other dangers to the security of your data.
To disable this warning define the following constant at top-level in your
application:
I_KNOW_THAT_OPENSSL_VERIFY_PEER_EQUALS_VERIFY_NONE_IS_WRONG = nil
WARNING
end
connection.ca_file = @ca_file if @ca_file
connection.ca_path = @ca_path if @ca_path
if @ca_file or @ca_path then
connection.verify_callback = @verify_callback if @verify_callback
end
if @certificate and @private_key then
connection.cert = @certificate
connection.key = @private_key
end
connection.cert_store = if @cert_store then
@cert_store
else
store = OpenSSL::X509::Store.new
store.set_default_paths
store
end
end
##
# SSL session lifetime
def ssl_timeout= ssl_timeout
@ssl_timeout = ssl_timeout
reconnect_ssl
end
##
# SSL version to use
def ssl_version= ssl_version
@ssl_version = ssl_version
reconnect_ssl
end
##
# Sets the depth of SSL certificate verification
def verify_depth= verify_depth
@verify_depth = verify_depth
reconnect_ssl
end
##
# Sets the HTTPS verify mode. Defaults to OpenSSL::SSL::VERIFY_PEER.
#
# Setting this to VERIFY_NONE is a VERY BAD IDEA and should NEVER be used.
# Securely transfer the correct certificate and update the default
# certificate store or set the ca file instead.
def verify_mode= verify_mode
@verify_mode = verify_mode
reconnect_ssl
end
##
# SSL verification callback.
def verify_callback= callback
@verify_callback = callback
reconnect_ssl
end
end
|
alexreisner/geocoder | lib/geocoder/calculations.rb | Geocoder.Calculations.coordinates_present? | ruby | def coordinates_present?(*args)
args.each do |a|
# note that Float::NAN != Float::NAN
# still, this could probably be improved:
return false if (!a.is_a?(Numeric) or a.to_s == "NaN")
end
true
end | Returns true if all given arguments are valid latitude/longitude values. | train | https://github.com/alexreisner/geocoder/blob/e087dc2759264ee6f307b926bb2de4ec2406859e/lib/geocoder/calculations.rb#L44-L51 | module Calculations
extend self
##
# Compass point names, listed clockwise starting at North.
#
# If you want bearings named using more, fewer, or different points
# override Geocoder::Calculations.COMPASS_POINTS with your own array.
#
COMPASS_POINTS = %w[N NE E SE S SW W NW]
##
# Conversion factor: multiply by kilometers to get miles.
#
KM_IN_MI = 0.621371192
##
# Conversion factor: multiply by nautical miles to get miles.
#
KM_IN_NM = 0.539957
##
# Conversion factor: multiply by radians to get degrees.
#
DEGREES_PER_RADIAN = 57.2957795
##
# Radius of the Earth, in kilometers.
# Value taken from: http://en.wikipedia.org/wiki/Earth_radius
#
EARTH_RADII = {km: 6371.0}
EARTH_RADII[:mi] = EARTH_RADII[:km] * KM_IN_MI
EARTH_RADII[:nm] = EARTH_RADII[:km] * KM_IN_NM
EARTH_RADIUS = EARTH_RADII[:km] # TODO: deprecate this constant (use `EARTH_RADII[:km]`)
# Not a number constant
NAN = defined?(::Float::NAN) ? ::Float::NAN : 0 / 0.0
##
# Returns true if all given arguments are valid latitude/longitude values.
#
##
# Distance spanned by one degree of latitude in the given units.
#
def latitude_degree_distance(units = nil)
2 * Math::PI * earth_radius(units) / 360
end
##
# Distance spanned by one degree of longitude at the given latitude.
# This ranges from around 69 miles at the equator to zero at the poles.
#
def longitude_degree_distance(latitude, units = nil)
latitude_degree_distance(units) * Math.cos(to_radians(latitude))
end
##
# Distance between two points on Earth (Haversine formula).
# Takes two points and an options hash.
# The points are given in the same way that points are given to all
# Geocoder methods that accept points as arguments. They can be:
#
# * an array of coordinates ([lat,lon])
# * a geocodable address (string)
# * a geocoded object (one which implements a +to_coordinates+ method
# which returns a [lat,lon] array
#
# The options hash supports:
#
# * <tt>:units</tt> - <tt>:mi</tt> or <tt>:km</tt>
# Use Geocoder.configure(:units => ...) to configure default units.
#
def distance_between(point1, point2, options = {})
# convert to coordinate arrays
point1 = extract_coordinates(point1)
point2 = extract_coordinates(point2)
# convert degrees to radians
point1 = to_radians(point1)
point2 = to_radians(point2)
# compute deltas
dlat = point2[0] - point1[0]
dlon = point2[1] - point1[1]
a = (Math.sin(dlat / 2))**2 + Math.cos(point1[0]) *
(Math.sin(dlon / 2))**2 * Math.cos(point2[0])
c = 2 * Math.atan2( Math.sqrt(a), Math.sqrt(1-a))
c * earth_radius(options[:units])
end
##
# Bearing between two points on Earth.
# Returns a number of degrees from due north (clockwise).
#
# See Geocoder::Calculations.distance_between for
# ways of specifying the points. Also accepts an options hash:
#
# * <tt>:method</tt> - <tt>:linear</tt> or <tt>:spherical</tt>;
# the spherical method is "correct" in that it returns the shortest path
# (one along a great circle) but the linear method is less confusing
# (returns due east or west when given two points with the same latitude).
# Use Geocoder.configure(:distances => ...) to configure calculation method.
#
# Based on: http://www.movable-type.co.uk/scripts/latlong.html
#
def bearing_between(point1, point2, options = {})
# set default options
options[:method] ||= Geocoder.config.distances
options[:method] = :linear unless options[:method] == :spherical
# convert to coordinate arrays
point1 = extract_coordinates(point1)
point2 = extract_coordinates(point2)
# convert degrees to radians
point1 = to_radians(point1)
point2 = to_radians(point2)
# compute deltas
dlat = point2[0] - point1[0]
dlon = point2[1] - point1[1]
case options[:method]
when :linear
y = dlon
x = dlat
when :spherical
y = Math.sin(dlon) * Math.cos(point2[0])
x = Math.cos(point1[0]) * Math.sin(point2[0]) -
Math.sin(point1[0]) * Math.cos(point2[0]) * Math.cos(dlon)
end
bearing = Math.atan2(x,y)
# Answer is in radians counterclockwise from due east.
# Convert to degrees clockwise from due north:
(90 - to_degrees(bearing) + 360) % 360
end
##
# Translate a bearing (float) into a compass direction (string, eg "North").
#
def compass_point(bearing, points = COMPASS_POINTS)
seg_size = 360.0 / points.size
points[((bearing + (seg_size / 2)) % 360) / seg_size]
end
##
# Compute the geographic center (aka geographic midpoint, center of
# gravity) for an array of geocoded objects and/or [lat,lon] arrays
# (can be mixed). Any objects missing coordinates are ignored. Follows
# the procedure documented at http://www.geomidpoint.com/calculation.html.
#
def geographic_center(points)
# convert objects to [lat,lon] arrays and convert degrees to radians
coords = points.map{ |p| to_radians(extract_coordinates(p)) }
# convert to Cartesian coordinates
x = []; y = []; z = []
coords.each do |p|
x << Math.cos(p[0]) * Math.cos(p[1])
y << Math.cos(p[0]) * Math.sin(p[1])
z << Math.sin(p[0])
end
# compute average coordinate values
xa, ya, za = [x,y,z].map do |c|
c.inject(0){ |tot,i| tot += i } / c.size.to_f
end
# convert back to latitude/longitude
lon = Math.atan2(ya, xa)
hyp = Math.sqrt(xa**2 + ya**2)
lat = Math.atan2(za, hyp)
# return answer in degrees
to_degrees [lat, lon]
end
##
# Returns coordinates of the southwest and northeast corners of a box
# with the given point at its center. The radius is the shortest distance
# from the center point to any side of the box (the length of each side
# is twice the radius).
#
# This is useful for finding corner points of a map viewport, or for
# roughly limiting the possible solutions in a geo-spatial search
# (ActiveRecord queries use it thusly).
#
# See Geocoder::Calculations.distance_between for
# ways of specifying the point. Also accepts an options hash:
#
# * <tt>:units</tt> - <tt>:mi</tt> or <tt>:km</tt>.
# Use Geocoder.configure(:units => ...) to configure default units.
#
def bounding_box(point, radius, options = {})
lat,lon = extract_coordinates(point)
radius = radius.to_f
[
lat - (radius / latitude_degree_distance(options[:units])),
lon - (radius / longitude_degree_distance(lat, options[:units])),
lat + (radius / latitude_degree_distance(options[:units])),
lon + (radius / longitude_degree_distance(lat, options[:units]))
]
end
##
# Random point within a circle of provided radius centered
# around the provided point
# Takes one point, one radius, and an options hash.
# The points are given in the same way that points are given to all
# Geocoder methods that accept points as arguments. They can be:
#
# * an array of coordinates ([lat,lon])
# * a geocodable address (string)
# * a geocoded object (one which implements a +to_coordinates+ method
# which returns a [lat,lon] array
#
# The options hash supports:
#
# * <tt>:units</tt> - <tt>:mi</tt> or <tt>:km</tt>
# Use Geocoder.configure(:units => ...) to configure default units.
# * <tt>:seed</tt> - The seed for the random number generator
def random_point_near(center, radius, options = {})
random = Random.new(options[:seed] || Random.new_seed)
# convert to coordinate arrays
center = extract_coordinates(center)
earth_circumference = 2 * Math::PI * earth_radius(options[:units])
max_degree_delta = 360.0 * (radius / earth_circumference)
# random bearing in radians
theta = 2 * Math::PI * random.rand
# random radius, use the square root to ensure a uniform
# distribution of points over the circle
r = Math.sqrt(random.rand) * max_degree_delta
delta_lat, delta_long = [r * Math.cos(theta), r * Math.sin(theta)]
[center[0] + delta_lat, center[1] + delta_long]
end
##
# Given a start point, heading (in degrees), and distance, provides
# an endpoint.
# The starting point is given in the same way that points are given to all
# Geocoder methods that accept points as arguments. It can be:
#
# * an array of coordinates ([lat,lon])
# * a geocodable address (string)
# * a geocoded object (one which implements a +to_coordinates+ method
# which returns a [lat,lon] array
#
def endpoint(start, heading, distance, options = {})
radius = earth_radius(options[:units])
start = extract_coordinates(start)
# convert degrees to radians
start = to_radians(start)
lat = start[0]
lon = start[1]
heading = to_radians(heading)
distance = distance.to_f
end_lat = Math.asin(Math.sin(lat)*Math.cos(distance/radius) +
Math.cos(lat)*Math.sin(distance/radius)*Math.cos(heading))
end_lon = lon+Math.atan2(Math.sin(heading)*Math.sin(distance/radius)*Math.cos(lat),
Math.cos(distance/radius)-Math.sin(lat)*Math.sin(end_lat))
to_degrees [end_lat, end_lon]
end
##
# Convert degrees to radians.
# If an array (or multiple arguments) is passed,
# converts each value and returns array.
#
def to_radians(*args)
args = args.first if args.first.is_a?(Array)
if args.size == 1
args.first * (Math::PI / 180)
else
args.map{ |i| to_radians(i) }
end
end
##
# Convert radians to degrees.
# If an array (or multiple arguments) is passed,
# converts each value and returns array.
#
def to_degrees(*args)
args = args.first if args.first.is_a?(Array)
if args.size == 1
(args.first * 180.0) / Math::PI
else
args.map{ |i| to_degrees(i) }
end
end
def distance_to_radians(distance, units = nil)
distance.to_f / earth_radius(units)
end
def radians_to_distance(radians, units = nil)
radians * earth_radius(units)
end
##
# Convert miles to kilometers.
#
def to_kilometers(mi)
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.to_kilometers is deprecated and will be removed in Geocoder 1.5.0. Please multiply by MI_IN_KM instead.")
mi * mi_in_km
end
##
# Convert kilometers to miles.
#
def to_miles(km)
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.to_miles is deprecated and will be removed in Geocoder 1.5.0. Please multiply by KM_IN_MI instead.")
km * KM_IN_MI
end
##
# Convert kilometers to nautical miles.
#
def to_nautical_miles(km)
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.to_nautical_miles is deprecated and will be removed in Geocoder 1.5.0. Please multiply by KM_IN_NM instead.")
km * KM_IN_NM
end
##
# Radius of the Earth in the given units (:mi or :km).
# Use Geocoder.configure(:units => ...) to configure default units.
#
def earth_radius(units = nil)
EARTH_RADII[units || Geocoder.config.units]
end
##
# Conversion factor: km to mi.
#
def km_in_mi
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.km_in_mi is deprecated and will be removed in Geocoder 1.5.0. Please use the constant KM_IN_MI instead.")
KM_IN_MI
end
##
# Conversion factor: km to nm.
#
def km_in_nm
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.km_in_nm is deprecated and will be removed in Geocoder 1.5.0. Please use the constant KM_IN_NM instead.")
KM_IN_NM
end
##
# Conversion factor: mi to km.
#
def mi_in_km
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.mi_in_km is deprecated and will be removed in Geocoder 1.5.0. Please use 1.0 / KM_IN_MI instead.")
1.0 / KM_IN_MI
end
##
# Conversion factor: nm to km.
#
def nm_in_km
Geocoder.log(:warn, "DEPRECATION WARNING: Geocoder::Calculations.nm_in_km is deprecated and will be removed in Geocoder 1.5.0. Please use 1.0 / KM_IN_NM instead.")
1.0 / KM_IN_NM
end
##
# Takes an object which is a [lat,lon] array, a geocodable string,
# or an object that implements +to_coordinates+ and returns a
# [lat,lon] array. Note that if a string is passed this may be a slow-
# running method and may return nil.
#
def extract_coordinates(point)
case point
when Array
if point.size == 2
lat, lon = point
if !lat.nil? && lat.respond_to?(:to_f) and
!lon.nil? && lon.respond_to?(:to_f)
then
return [ lat.to_f, lon.to_f ]
end
end
when String
point = Geocoder.coordinates(point) and return point
else
if point.respond_to?(:to_coordinates)
if Array === array = point.to_coordinates
return extract_coordinates(array)
end
end
end
[ NAN, NAN ]
end
end
|
hashicorp/vagrant | lib/vagrant/machine.rb | Vagrant.Machine.reload | ruby | def reload
old_id = @id
@id = nil
if @data_dir
# Read the id file from the data directory if it exists as the
# ID for the pre-existing physical representation of this machine.
id_file = @data_dir.join("id")
id_content = id_file.read.strip if id_file.file?
if !id_content.to_s.empty?
@id = id_content
end
end
if @id != old_id && @provider
# It changed, notify the provider
@provider.machine_id_changed
end
@id
end | This reloads the ID of the underlying machine. | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/machine.rb#L394-L414 | class Machine
# The box that is backing this machine.
#
# @return [Box]
attr_accessor :box
# Configuration for the machine.
#
# @return [Object]
attr_accessor :config
# Directory where machine-specific data can be stored.
#
# @return [Pathname]
attr_reader :data_dir
# The environment that this machine is a part of.
#
# @return [Environment]
attr_reader :env
# ID of the machine. This ID comes from the provider and is not
# guaranteed to be of any particular format except that it is
# a string.
#
# @return [String]
attr_reader :id
# Name of the machine. This is assigned by the Vagrantfile.
#
# @return [Symbol]
attr_reader :name
# The provider backing this machine.
#
# @return [Object]
attr_reader :provider
# The provider-specific configuration for this machine.
#
# @return [Object]
attr_accessor :provider_config
# The name of the provider.
#
# @return [Symbol]
attr_reader :provider_name
# The options given to the provider when registering the plugin.
#
# @return [Hash]
attr_reader :provider_options
# The UI for outputting in the scope of this machine.
#
# @return [UI]
attr_reader :ui
# The Vagrantfile that this machine is attached to.
#
# @return [Vagrantfile]
attr_reader :vagrantfile
# Initialize a new machine.
#
# @param [String] name Name of the virtual machine.
# @param [Class] provider The provider backing this machine. This is
# currently expected to be a V1 `provider` plugin.
# @param [Object] provider_config The provider-specific configuration for
# this machine.
# @param [Hash] provider_options The provider-specific options from the
# plugin definition.
# @param [Object] config The configuration for this machine.
# @param [Pathname] data_dir The directory where machine-specific data
# can be stored. This directory is ensured to exist.
# @param [Box] box The box that is backing this virtual machine.
# @param [Environment] env The environment that this machine is a
# part of.
def initialize(name, provider_name, provider_cls, provider_config, provider_options, config, data_dir, box, env, vagrantfile, base=false)
@logger = Log4r::Logger.new("vagrant::machine")
@logger.info("Initializing machine: #{name}")
@logger.info(" - Provider: #{provider_cls}")
@logger.info(" - Box: #{box}")
@logger.info(" - Data dir: #{data_dir}")
@box = box
@config = config
@data_dir = data_dir
@env = env
@vagrantfile = vagrantfile
@guest = Guest.new(
self,
Vagrant.plugin("2").manager.guests,
Vagrant.plugin("2").manager.guest_capabilities)
@name = name
@provider_config = provider_config
@provider_name = provider_name
@provider_options = provider_options
@ui = Vagrant::UI::Prefixed.new(@env.ui, @name)
@ui_mutex = Mutex.new
@state_mutex = Mutex.new
@triggers = Vagrant::Plugin::V2::Trigger.new(@env, @config.trigger, self, @ui)
# Read the ID, which is usually in local storage
@id = nil
# XXX: This is temporary. This will be removed very soon.
if base
@id = name
# For base setups, we don't want to insert the key
@config.ssh.insert_key = false
else
reload
end
# Keep track of where our UUID should be placed
@index_uuid_file = nil
@index_uuid_file = @data_dir.join("index_uuid") if @data_dir
# Initializes the provider last so that it has access to all the
# state we setup on this machine.
@provider = provider_cls.new(self)
@provider._initialize(@provider_name, self)
# If we're using WinRM, we eager load the plugin because of
# GH-3390
if @config.vm.communicator == :winrm
@logger.debug("Eager loading WinRM communicator to avoid GH-3390")
communicate
end
# If the ID is the special not created ID, then set our ID to
# nil so that we destroy all our data.
if state.id == MachineState::NOT_CREATED_ID
self.id = nil
end
# Output a bunch of information about this machine in
# machine-readable format in case someone is listening.
@ui.machine("metadata", "provider", provider_name)
end
# This calls an action on the provider. The provider may or may not
# actually implement the action.
#
# @param [Symbol] name Name of the action to run.
# @param [Hash] extra_env This data will be passed into the action runner
# as extra data set on the environment hash for the middleware
# runner.
def action(name, opts=nil)
@triggers.fire_triggers(name, :before, @name.to_s, :action)
@logger.info("Calling action: #{name} on provider #{@provider}")
opts ||= {}
# Determine whether we lock or not
lock = true
lock = opts.delete(:lock) if opts.key?(:lock)
# Extra env keys are the remaining opts
extra_env = opts.dup
# An environment is required for triggers to function properly. This is
# passed in specifically for the `#Action::Warden` class triggers. We call it
# `:trigger_env` instead of `env` in case it collides with an existing environment
extra_env[:trigger_env] = @env
check_cwd # Warns the UI if the machine was last used on a different dir
# Create a deterministic ID for this machine
vf = nil
vf = @env.vagrantfile_name[0] if @env.vagrantfile_name
id = Digest::MD5.hexdigest(
"#{@env.root_path}#{vf}#{@env.local_data_path}#{@name}")
# We only lock if we're not executing an SSH action. In the future
# we will want to do more fine-grained unlocking in actions themselves
# but for a 1.6.2 release this will work.
locker = Proc.new { |*args, &block| block.call }
locker = @env.method(:lock) if lock && !name.to_s.start_with?("ssh")
# Lock this machine for the duration of this action
return_env = locker.call("machine-action-#{id}") do
# Get the callable from the provider.
callable = @provider.action(name)
# If this action doesn't exist on the provider, then an exception
# must be raised.
if callable.nil?
raise Errors::UnimplementedProviderAction,
action: name,
provider: @provider.to_s
end
# Call the action
ui.machine("action", name.to_s, "start")
action_result = action_raw(name, callable, extra_env)
ui.machine("action", name.to_s, "end")
action_result
end
@triggers.fire_triggers(name, :after, @name.to_s, :action)
# preserve returning environment after machine action runs
return return_env
rescue Errors::EnvironmentLockedError
raise Errors::MachineActionLockedError,
action: name,
name: @name
end
# This calls a raw callable in the proper context of the machine using
# the middleware stack.
#
# @param [Symbol] name Name of the action
# @param [Proc] callable
# @param [Hash] extra_env Extra env for the action env.
# @return [Hash] The resulting env
def action_raw(name, callable, extra_env=nil)
# Run the action with the action runner on the environment
env = {
action_name: "machine_action_#{name}".to_sym,
machine: self,
machine_action: name,
ui: @ui,
}.merge(extra_env || {})
@env.action_runner.run(callable, env)
end
# Returns a communication object for executing commands on the remote
# machine. Note that the _exact_ semantics of this are up to the
# communication provider itself. Despite this, the semantics are expected
# to be consistent across operating systems. For example, all linux-based
# systems should have similar communication (usually a shell). All
# Windows systems should have similar communication as well. Therefore,
# prior to communicating with the machine, users of this method are
# expected to check the guest OS to determine their behavior.
#
# This method will _always_ return some valid communication object.
# The `ready?` API can be used on the object to check if communication
# is actually ready.
#
# @return [Object]
def communicate
if !@communicator
requested = @config.vm.communicator
requested ||= :ssh
klass = Vagrant.plugin("2").manager.communicators[requested]
raise Errors::CommunicatorNotFound, comm: requested.to_s if !klass
@communicator = klass.new(self)
end
@communicator
end
# Returns a guest implementation for this machine. The guest implementation
# knows how to do guest-OS specific tasks, such as configuring networks,
# mounting folders, etc.
#
# @return [Guest]
def guest
raise Errors::MachineGuestNotReady if !communicate.ready?
@guest.detect! if [email protected]?
@guest
end
# This sets the unique ID associated with this machine. This will
# persist this ID so that in the future Vagrant will be able to find
# this machine again. The unique ID must be absolutely unique to the
# virtual machine, and can be used by providers for finding the
# actual machine associated with this instance.
#
# **WARNING:** Only providers should ever use this method.
#
# @param [String] value The ID.
def id=(value)
@logger.info("New machine ID: #{value.inspect}")
id_file = nil
if @data_dir
# The file that will store the id if we have one. This allows the
# ID to persist across Vagrant runs. Also, store the UUID for the
# machine index.
id_file = @data_dir.join("id")
end
if value
if id_file
# Write the "id" file with the id given.
id_file.open("w+") do |f|
f.write(value)
end
end
if uid_file
# Write the user id that created this machine
uid_file.open("w+") do |f|
f.write(Process.uid.to_s)
end
end
# If we don't have a UUID, then create one
if index_uuid.nil?
# Create the index entry and save it
entry = MachineIndex::Entry.new
entry.local_data_path = @env.local_data_path
entry.name = @name.to_s
entry.provider = @provider_name.to_s
entry.state = "preparing"
entry.vagrantfile_path = @env.root_path
entry.vagrantfile_name = @env.vagrantfile_name
if @box
entry.extra_data["box"] = {
"name" => @box.name,
"provider" => @box.provider.to_s,
"version" => @box.version.to_s,
}
end
entry = @env.machine_index.set(entry)
@env.machine_index.release(entry)
# Store our UUID so we can access it later
if @index_uuid_file
@index_uuid_file.open("w+") do |f|
f.write(entry.id)
end
end
end
else
# Delete the file, since the machine is now destroyed
id_file.delete if id_file && id_file.file?
uid_file.delete if uid_file && uid_file.file?
# If we have a UUID associated with the index, remove it
uuid = index_uuid
if uuid
entry = @env.machine_index.get(uuid)
@env.machine_index.delete(entry) if entry
end
if @data_dir
# Delete the entire data directory contents since all state
# associated with the VM is now gone.
@data_dir.children.each do |child|
begin
child.rmtree
rescue Errno::EACCES
@logger.info("EACCESS deleting file: #{child}")
end
end
end
end
# Store the ID locally
@id = value.nil? ? nil : value.to_s
# Notify the provider that the ID changed in case it needs to do
# any accounting from it.
@provider.machine_id_changed
end
# Returns the UUID associated with this machine in the machine
# index. We only have a UUID if an ID has been set.
#
# @return [String] UUID or nil if we don't have one yet.
def index_uuid
return nil if !@index_uuid_file
return @index_uuid_file.read.chomp if @index_uuid_file.file?
return nil
end
# This returns a clean inspect value so that printing the value via
# a pretty print (`p`) results in a readable value.
#
# @return [String]
def inspect
"#<#{self.class}: #{@name} (#{@provider.class})>"
end
# This reloads the ID of the underlying machine.
# This returns the SSH info for accessing this machine. This SSH info
# is queried from the underlying provider. This method returns `nil` if
# the machine is not ready for SSH communication.
#
# The structure of the resulting hash is guaranteed to contain the
# following structure, although it may return other keys as well
# not documented here:
#
# {
# host: "1.2.3.4",
# port: "22",
# username: "mitchellh",
# private_key_path: "/path/to/my/key"
# }
#
# Note that Vagrant makes no guarantee that this info works or is
# correct. This is simply the data that the provider gives us or that
# is configured via a Vagrantfile. It is still possible after this
# point when attempting to connect via SSH to get authentication
# errors.
#
# @return [Hash] SSH information.
def ssh_info
# First, ask the provider for their information. If the provider
# returns nil, then the machine is simply not ready for SSH, and
# we return nil as well.
info = @provider.ssh_info
return nil if info.nil?
# Delete out the nil entries.
info.dup.each do |key, value|
info.delete(key) if value.nil?
end
# We set the defaults
info[:host] ||= @config.ssh.default.host
info[:port] ||= @config.ssh.default.port
info[:private_key_path] ||= @config.ssh.default.private_key_path
info[:keys_only] ||= @config.ssh.default.keys_only
info[:verify_host_key] ||= @config.ssh.default.verify_host_key
info[:username] ||= @config.ssh.default.username
info[:remote_user] ||= @config.ssh.default.remote_user
info[:compression] ||= @config.ssh.default.compression
info[:dsa_authentication] ||= @config.ssh.default.dsa_authentication
info[:extra_args] ||= @config.ssh.default.extra_args
info[:config] ||= @config.ssh.default.config
# We set overrides if they are set. These take precedence over
# provider-returned data.
info[:host] = @config.ssh.host if @config.ssh.host
info[:port] = @config.ssh.port if @config.ssh.port
info[:keys_only] = @config.ssh.keys_only
info[:verify_host_key] = @config.ssh.verify_host_key
info[:compression] = @config.ssh.compression
info[:dsa_authentication] = @config.ssh.dsa_authentication
info[:username] = @config.ssh.username if @config.ssh.username
info[:password] = @config.ssh.password if @config.ssh.password
info[:remote_user] = @config.ssh.remote_user if @config.ssh.remote_user
info[:extra_args] = @config.ssh.extra_args if @config.ssh.extra_args
info[:config] = @config.ssh.config if @config.ssh.config
# We also set some fields that are purely controlled by Vagrant
info[:forward_agent] = @config.ssh.forward_agent
info[:forward_x11] = @config.ssh.forward_x11
info[:forward_env] = @config.ssh.forward_env
info[:ssh_command] = @config.ssh.ssh_command if @config.ssh.ssh_command
# Add in provided proxy command config
info[:proxy_command] = @config.ssh.proxy_command if @config.ssh.proxy_command
# Set the private key path. If a specific private key is given in
# the Vagrantfile we set that. Otherwise, we use the default (insecure)
# private key, but only if the provider didn't give us one.
if !info[:private_key_path] && !info[:password]
if @config.ssh.private_key_path
info[:private_key_path] = @config.ssh.private_key_path
elsif info[:keys_only]
info[:private_key_path] = @env.default_private_key_path
end
end
# If we have a private key in our data dir, then use that
if @data_dir && [email protected]_key_path
data_private_key = @data_dir.join("private_key")
if data_private_key.file?
info[:private_key_path] = [data_private_key.to_s]
end
end
# Setup the keys
info[:private_key_path] ||= []
info[:private_key_path] = Array(info[:private_key_path])
# Expand the private key path relative to the root path
info[:private_key_path].map! do |path|
File.expand_path(path, @env.root_path)
end
# Check that the private key permissions are valid
info[:private_key_path].each do |path|
key_path = Pathname.new(path)
if key_path.exist?
Vagrant::Util::SSH.check_key_permissions(key_path)
end
end
# Return the final compiled SSH info data
info
end
# Returns the state of this machine. The state is queried from the
# backing provider, so it can be any arbitrary symbol.
#
# @return [MachineState]
def state
result = @provider.state
raise Errors::MachineStateInvalid if !result.is_a?(MachineState)
# Update our state cache if we have a UUID and an entry in the
# master index.
uuid = index_uuid
if uuid
# active_machines provides access to query this info on each machine
# from a different thread, ensure multiple machines do not access
# the locked entry simultaneously as this triggers a locked machine
# exception.
@state_mutex.synchronize do
entry = @env.machine_index.get(uuid)
if entry
entry.state = result.short_description
@env.machine_index.set(entry)
@env.machine_index.release(entry)
end
end
end
result
end
# Returns the user ID that created this machine. This is specific to
# the host machine that this was created on.
#
# @return [String]
def uid
path = uid_file
return nil if !path
return nil if !path.file?
return uid_file.read.chomp
end
# Temporarily changes the machine UI. This is useful if you want
# to execute an {#action} with a different UI.
def with_ui(ui)
@ui_mutex.synchronize do
begin
old_ui = @ui
@ui = ui
yield
ensure
@ui = old_ui
end
end
end
protected
# Returns the path to the file that stores the UID.
def uid_file
return nil if !@data_dir
@data_dir.join("creator_uid")
end
# Checks the current directory for a given machine
# and displays a warning if that machine has moved
# from its previous location on disk. If the machine
# has moved, it prints a warning to the user.
def check_cwd
desired_encoding = @env.root_path.to_s.encoding
vagrant_cwd_filepath = @data_dir.join('vagrant_cwd')
vagrant_cwd = if File.exist?(vagrant_cwd_filepath)
File.read(vagrant_cwd_filepath,
external_encoding: desired_encoding
).chomp
end
if !File.identical?(vagrant_cwd.to_s, @env.root_path.to_s)
if vagrant_cwd
ui.warn(I18n.t(
'vagrant.moved_cwd',
old_wd: "#{vagrant_cwd}",
current_wd: "#{@env.root_path.to_s}"))
end
File.write(vagrant_cwd_filepath, @env.root_path.to_s,
external_encoding: desired_encoding
)
end
end
end
|
tetradice/neuroncheck | lib/neuroncheck/declaration.rb | NeuronCheckSystem.DeclarationMethods.__neuroncheck_ndecl_main_with_block | ruby | def __neuroncheck_ndecl_main_with_block(block, declared_caller_locations)
# 宣言ブロック実行用のコンテキストを作成
context = NeuronCheckSystem::DeclarationContext.new
# 宣言ブロックの内容を実行
context.instance_eval(&block)
# 呼び出し場所を記憶
context.declaration.declared_caller_locations = declared_caller_locations
# 宣言の内容を「最後の宣言」として保持
@__neuron_check_last_declaration = context.declaration
end | ndeclの通常記法 | train | https://github.com/tetradice/neuroncheck/blob/0505dedd8f7a8018a3891f7519f7861e1c787014/lib/neuroncheck/declaration.rb#L49-L61 | module DeclarationMethods
# 宣言を実行
def ndecl(*expecteds, &block)
# 未初期化の場合、NeuronCheck用の初期化を自動実行
unless @__neuron_check_initialized then
NeuronCheckSystem.initialize_module_for_neuron_check(self)
end
# メイン処理実行
__neuroncheck_ndecl_main(expecteds, block, caller(1, 1))
end
# ndeclのエイリアス
alias ncheck ndecl
alias ndeclare ndecl
alias nsig ndecl
alias ntypesig ndecl
# ndeclのメイン処理
def __neuroncheck_ndecl_main(expecteds, block, declared_caller_locations)
# 2回連続で宣言された場合はエラー
if @__neuron_check_last_declaration then
raise DeclarationError, "repeated declarations - Declaration block and method definition must correspond one-to-one"
end
# ブロックが渡されたかどうかで処理を分岐
if block then
# ブロックが渡された場合
__neuroncheck_ndecl_main_with_block(block, declared_caller_locations)
else
# 短縮記法はNeuronCheckSyntax使用可能時のみ
unless defined?(NeuronCheckSyntax) then
raise DeclarationError, "NeuronCheck shorthand syntax (without block) can be used only in Ruby 2.1 or later"
end
# ブロックが渡されていない場合 (短縮記法)
__neuroncheck_ndecl_main_without_block(expecteds, declared_caller_locations)
end
end
# ndeclの通常記法
def __neuroncheck_ndecl_main_with_block(block, declared_caller_locations)
# 宣言ブロック実行用のコンテキストを作成
context = NeuronCheckSystem::DeclarationContext.new
# 宣言ブロックの内容を実行
context.instance_eval(&block)
# 呼び出し場所を記憶
context.declaration.declared_caller_locations = declared_caller_locations
# 宣言の内容を「最後の宣言」として保持
@__neuron_check_last_declaration = context.declaration
end
# ndeclの短縮記法
def __neuroncheck_ndecl_main_without_block(expecteds, declared_caller_locations)
# 宣言ブロック実行用のコンテキストを作成
context = NeuronCheckSystem::DeclarationContext.new
# 引数の解釈
expected_args = nil
expected_return = nil
if expecteds.last.kind_of?(Hash) and expecteds.last.size == 1 then
# expectedsの最後が、値が1つだけ格納されたHashであれば、キーを最後の引数、値を戻り値と解釈する
# 例: String, String => Numeric
last_hash = expecteds.pop
expected_args = expecteds.concat([last_hash.keys.first])
expected_return = last_hash.values.first
else
# 上記以外の場合はすべて引数と見なす
expected_args = expecteds
end
# 引数1つで、かつ空配列が渡された場合は、「引数なし」と宣言されたとみなす
if expected_args[0].kind_of?(Array) and expected_args.size == 1 then
expected_args = []
end
# 簡易宣言を実行
context.instance_eval do
unless expected_args.empty? then
args *expected_args
end
if expected_return then
returns expected_return
end
end
# 短縮記法フラグON
context.declaration.shorthand = true
# 呼び出し場所を記憶
context.declaration.declared_caller_locations = declared_caller_locations
context.declaration.arg_matchers.each do |matcher|
matcher.declared_caller_locations = context.declaration.declared_caller_locations
end
if context.declaration.return_matcher then
context.declaration.return_matcher.declared_caller_locations = context.declaration.declared_caller_locations
end
# 宣言の内容を「最後の宣言」として保持 (通常のndeclと同じ)
@__neuron_check_last_declaration = context.declaration
end
end
|
thumblemonks/riot | lib/riot/assertion_macros/kind_of.rb | Riot.KindOfMacro.evaluate | ruby | def evaluate(actual, expected)
if actual.kind_of?(expected)
pass new_message.is_a_kind_of(expected)
else
fail expected_message.kind_of(expected).not(actual.class)
end
end | (see Riot::AssertionMacro#evaluate)
@param [Class] expected the expected class of actual | train | https://github.com/thumblemonks/riot/blob/e99a8965f2d28730fc863c647ca40b3bffb9e562/lib/riot/assertion_macros/kind_of.rb#L15-L21 | class KindOfMacro < AssertionMacro
register :kind_of
# (see Riot::AssertionMacro#evaluate)
# @param [Class] expected the expected class of actual
# (see Riot::AssertionMacro#devaluate)
# @param [Class] expected the unexpected class of actual
def devaluate(actual, expected)
if actual.kind_of?(expected)
fail expected_message.not_kind_of(expected).not(actual.class)
else
pass new_message.is_a_kind_of(expected)
end
end
end
|
NCSU-Libraries/quick_search | app/controllers/quick_search/appstats_controller.rb | QuickSearch.AppstatsController.data_sessions_overview | ruby | def data_sessions_overview
onCampus = params[:onCampus] ? params[:onCampus].to_i : 0
offCampus = params[:offCampus] ? params[:offCampus].to_i : 0
isMobile = params[:isMobile] ? params[:isMobile].to_i : 0
notMobile = params[:notMobile] ? params[:notMobile].to_i : 0
filterCase = (2**3)*onCampus + (2**2)*offCampus + (2**1)*isMobile + notMobile
case filterCase
when 1 #mobile=f
sessions = Session.where(@range).where(:is_mobile => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 2 #mobile=t
sessions = Session.where(@range).where(:is_mobile => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 4 #campus=f
sessions = Session.where(@range).where(:on_campus => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 5 #campus=f, mobile=f
sessions = Session.where(@range).where(:on_campus => false, :is_mobile => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 6 #campus=f, mobile=t
sessions = Session.where(@range).where(:on_campus => false, :is_mobile => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 8 #campus=t
sessions = Session.where(@range).where(:on_campus => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 9 #campus=t, mobile=f
sessions = Session.where(@range).where(:on_campus => true, :is_mobile => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
when 10 #campus=t, mobile=t
sessions = Session.where(@range).where(:on_campus => true, :is_mobile => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
else
sessions = Session.where(@range).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
end
@result = process_time_query(sessions)
render_data
end | In order to obtain all filter cases, an integer corresponding to the following truth table is formed:
rowNumber onCampus offCampus isMobile notMobile | filters
0 0 0 0 0 | Neither filter applied (default)
1 0 0 0 1 | where(is_mobile=>false)
2 0 0 1 0 | where(is_mobile=>true)
3 0 0 1 1 | INVALID (isMobile & notMobile asserted)
4 0 1 0 0 | where(on_campus=>false)
5 0 1 0 1 | where(on_campus=>false, is_mobile=>false)
6 0 1 1 0 | where(on_campus=>false, is_mobile=>true)
7 0 1 1 1 | INVALID (isMobile & notMobile asserted)
8 1 0 0 0 | where(on_campus=>true)
9 1 0 0 1 | where(on_campus=>true, is_mobile=>false)
10 1 0 1 0 | where(on_campus=>true, is_mobile=>true)
11 1 0 1 1 | INVALID (isMobile & notMobile asserted)
12 1 1 0 0 | INVALID (onCampus & offCampus asserted)
13 1 1 0 1 | INVALID (onCampus & offCampus asserted)
14 1 1 1 0 | INVALID (onCampus & offCampus asserted)
15 1 1 1 1 | INVALID (onCampus & offCampus asserted)
Thus, the integer filterCase, which corresponds to the rowNumber, can be formed by converting 4 bit
binary term formed by the concatenation {onCampus, offCampus, isMobile, notMobile} into an integer.
Note: This filtering cannot be obtained by passing two boolean values (one for on_campus and one for is_mobile)
as this would fail to account for cases where no filter is applied to one variable (ie. where we don't care about
either location or device) | train | https://github.com/NCSU-Libraries/quick_search/blob/2e2c3f8682eed63a2bf2c008fa77f04ff9dd6a03/app/controllers/quick_search/appstats_controller.rb#L133-L164 | class AppstatsController < ApplicationController
include Auth
before_action :auth, :get_dates, :days_in_sample
def data_general_statistics
@result = []
clicks = Event.where(@range).where(:action => 'click').group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
@result << process_time_query(clicks)
sessions = Session.where(@range).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
@result << process_time_query(sessions)
searches = Search.where(@range).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
@result << process_time_query(searches)
render_data
end
def data_general_table
@result = []
@result << { "clicks" => Event.where(@range).where(:action => 'click').count }
@result << { "searches" => Search.where(@range).count }
@result << { "sessions" => Session.where(@range).count }
render_data
end
def data_module_clicks
clicks = Event.where(@range).where(excluded_categories).where(:action => 'click').group(:category).order("count_category DESC").count(:category)
total_clicks = clicks.values.sum
@result = process_module_result_query(clicks, "module", 0, 100, total_clicks)
render_data
end
def data_result_clicks
clicks = Event.where(@range).where(:category => "result-types").where(:action => 'click').group(:item).order("count_item DESC").count(:item)
total_clicks = clicks.values.sum
@result = process_module_result_query(clicks, "result", 0, 100, total_clicks)
render_data
end
def data_module_details
category = params[:category]
clicks = Event.where(:category => category).where(:action => 'click').where(@range).group(:item).order('count_category DESC').count(:category)
total_clicks = clicks.values.sum
@result = process_module_result_query(clicks, "module_details", category, 15, total_clicks)
render_data
end
def data_top_searches
num_results = params[:num_results] ? params[:num_results].to_i : 20
searches = Search.where(:page => '/').where(@range).group(:query).order('count_query DESC').count(:query)
total_searches = searches.sum {|k,v| v}
@result = process_searches_query(searches, num_results, total_searches)
render_data
end
def data_spelling_suggestions
num_results = params[:num_results] ? params[:num_results].to_i : 20
serves = Event.where(@range).where(:category => "spelling-suggestion", :action => 'serve').group(:item).order("count_category DESC").count(:category)
clicks = Event.where(@range).where(:category => "spelling-suggestion", :action => 'click').group(:item).count(:category)
@result = process_spelling_best_bets_query(serves, clicks, "spelling_suggestion", 0, num_results)
render_data
end
def data_spelling_details
item = params[:item]
serves = Event.where(@range).where(:category => "spelling-suggestion", :action => 'serve', :item => item).group(:query).order("count_query DESC").count(:query)
clicks = Event.where(@range).where(:category => "spelling-suggestion", :action => 'click', :item => item).group(:query).count(:query)
@result = process_spelling_best_bets_query(serves, clicks, "spelling_details", item, 15)
render_data
end
def data_best_bets
num_results = params[:num_results] ? params[:num_results].to_i : 20
serves = Event.where(@range).where(:category => "best-bets-regular", :action => 'serve').group(:item).order("count_category DESC").count(:category)
clicks = Event.where(@range).where(:category => "best-bets-regular", :action => 'click').group(:item).count(:category)
@result = process_spelling_best_bets_query(serves, clicks, "best_bet", 0, num_results)
render_data
end
def data_best_bets_details
item = params[:item]
serves = Event.where(@range).where(:category => "best-bets-regular", :action => 'serve', :item => item).group(:query).order("count_query DESC").count(:query)
clicks = Event.where(@range).where(:category => "best-bets-regular", :action => 'click', :item => item).group(:query).count(:query)
@result = process_spelling_best_bets_query(serves, clicks, "best_bet_details", item, 15)
render_data
end
# In order to obtain all filter cases, an integer corresponding to the following truth table is formed:
# rowNumber onCampus offCampus isMobile notMobile | filters
# 0 0 0 0 0 | Neither filter applied (default)
# 1 0 0 0 1 | where(is_mobile=>false)
# 2 0 0 1 0 | where(is_mobile=>true)
# 3 0 0 1 1 | INVALID (isMobile & notMobile asserted)
# 4 0 1 0 0 | where(on_campus=>false)
# 5 0 1 0 1 | where(on_campus=>false, is_mobile=>false)
# 6 0 1 1 0 | where(on_campus=>false, is_mobile=>true)
# 7 0 1 1 1 | INVALID (isMobile & notMobile asserted)
# 8 1 0 0 0 | where(on_campus=>true)
# 9 1 0 0 1 | where(on_campus=>true, is_mobile=>false)
# 10 1 0 1 0 | where(on_campus=>true, is_mobile=>true)
# 11 1 0 1 1 | INVALID (isMobile & notMobile asserted)
# 12 1 1 0 0 | INVALID (onCampus & offCampus asserted)
# 13 1 1 0 1 | INVALID (onCampus & offCampus asserted)
# 14 1 1 1 0 | INVALID (onCampus & offCampus asserted)
# 15 1 1 1 1 | INVALID (onCampus & offCampus asserted)
# Thus, the integer filterCase, which corresponds to the rowNumber, can be formed by converting 4 bit
# binary term formed by the concatenation {onCampus, offCampus, isMobile, notMobile} into an integer.
# Note: This filtering cannot be obtained by passing two boolean values (one for on_campus and one for is_mobile)
# as this would fail to account for cases where no filter is applied to one variable (ie. where we don't care about
# either location or device)
def data_sessions_location
use_perc = params[:use_perc]=="true" ? true : false
sessions_on = Session.where(@range).where(:on_campus => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
sessions_off = Session.where(@range).where(:on_campus => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
@result = process_stacked_time_query(sessions_on, sessions_off, use_perc)
render_data
end
def data_sessions_device
use_perc = params[:use_perc]=="true" ? true : false
sessions_on = Session.where(@range).where(:is_mobile => true).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
sessions_off = Session.where(@range).where(:is_mobile => false).group(:created_at_string).order("created_at_string ASC").count(:created_at_string)
@result = process_stacked_time_query(sessions_on, sessions_off, use_perc)
render_data
end
def process_time_query(query)
sub = []
query.each do |date , count|
row = { "date" => date ,
"count" => count}
sub << row
end
return sub
end
def process_stacked_time_query(query1, query2, use_perc)
sub = []
query1.each do |date , count1|
count2 = query2[date] ? query2[date] : 0
row = { "date" => date ,
"on" => use_perc ? count1.to_f/(count1+count2) : count1,
"off" => use_perc ? count2.to_f/(count1+count2) : count2}
sub << row
end
return sub
end
def process_module_result_query(query, keyHeading, parent, num_results, total_clicks)
sub = []
query.to_a[0..num_results-1].each_with_index do |d, i|
label = d[0]
count = d[1]
row = {"rank" => i+1,
"label" => (label.blank? ? "(blank)" : label),
"clickcount" => count,
"percentage" => ((100.0*count)/total_clicks).round(2),
"parent" => parent,
"expanded" => 0,
"key" => keyHeading + (label.blank? ? "(blank)" : label) + parent.to_s}
sub << row
end
return sub
end
def process_spelling_best_bets_query(serves, clicks, keyHeading, parent, num_results)
sub = []
serves.to_a[0..num_results-1].each_with_index do |d , i|
label = d[0]
serve_count = d[1]
click_count = clicks[label] ? clicks[label] : 0
row = {"rank" => i+1,
"label" => label,
"serves" => serve_count,
"clicks" => click_count,
"ratio" => (100.0*click_count/serve_count).round(2),
"parent" => parent,
"expanded" => 0,
"key" => keyHeading + label + parent.to_s}
sub << row
end
return sub
end
def process_searches_query(searches, num_results, total_searches)
sub = []
last_row = {}
searches.to_a[0..num_results-1].each_with_index do |d, i|
query = d[0]
count = d[1]
if (last_row=={})
last_cum_percentage = 0
else
last_cum_percentage = last_row["cum_perc"]
end
row = {"rank" => i+1,
"label" => query,
"count" => count,
"percentage" => ((100.0*count)/total_searches).round(2),
"cum_perc" => (last_cum_percentage + ((100.0*count)/total_searches)),
"cum_percentage" => (last_cum_percentage + ((100.0*count)/total_searches)).round(2),
"key" => "top_search" + query}
sub << row
last_row = row
end
return sub
end
def render_data
respond_to do |format|
format.json {
render :json => @result
}
end
end
def index
@page_title = 'Search Statistics'
end
def clicks_overview
@page_title = 'Clicks Overview'
end
def top_searches
@page_title = 'Top Searches'
end
def top_spot
@page_title = params[:ga_top_spot_module]
end
def sessions_overview
@page_title = 'Sessions Overview'
end
def sessions_details
@page_title = 'Sessions Details'
end
def convert_to_time(date_input)
Time.parse(date_input)
end
def days_in_sample
@days_in_sample = ((@end_date - @start_date) / (24*60*60)).round
if @days_in_sample < 1
@days_in_sample = 1
end
end
def get_dates
start = params[:start_date]
stop = params[:end_date]
if (!start.blank?)
@start_date = convert_to_time(start)
else
@start_date = Time.current - 180.days
end
if (!stop.blank?)
@end_date = convert_to_time(stop)
else
@end_date = Time.current
end
@range = { :created_at => @start_date..@end_date }
end
def excluded_categories
"category <> \"common-searches\" AND category <> \"result-types\"AND category <> \"typeahead\""
end
end
|
mongodb/mongoid | lib/mongoid/factory.rb | Mongoid.Factory.from_db | ruby | def from_db(klass, attributes = nil, criteria = nil, selected_fields = nil)
if criteria
selected_fields ||= criteria.options[:fields]
end
type = (attributes || {})[TYPE]
if type.blank?
obj = klass.instantiate(attributes, selected_fields)
if criteria && criteria.association && criteria.parent_document
obj.set_relation(criteria.association.inverse, criteria.parent_document)
end
obj
else
camelized = type.camelize
# Check if the class exists
begin
constantized = camelized.constantize
rescue NameError
raise Errors::UnknownModel.new(camelized, type)
end
# Check if the class is a Document class
if !constantized.respond_to?(:instantiate)
raise Errors::UnknownModel.new(camelized, type)
end
constantized.instantiate(attributes, selected_fields)
end
end | Builds a new +Document+ from the supplied attributes loaded from the
database.
If a criteria object is given, it is used in two ways:
1. If the criteria has a list of fields specified via #only,
only those fields are populated in the returned document.
2. If the criteria has a referencing association (i.e., this document
is being instantiated as an association of another document),
the other document is also populated in the returned document's
reverse association, if one exists.
@example Build the document.
Mongoid::Factory.from_db(Person, { "name" => "Durran" })
@param [ Class ] klass The class to instantiate from if _type is not present.
@param [ Hash ] attributes The document attributes.
@param [ Criteria ] criteria Optional criteria object.
@param [ Hash ] selected_fields Fields which were retrieved via
#only. If selected_fields are specified, fields not listed in it
will not be accessible in the returned document.
@return [ Document ] The instantiated document. | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/factory.rb#L52-L80 | module Factory
extend self
TYPE = "_type".freeze
# Builds a new +Document+ from the supplied attributes.
#
# @example Build the document.
# Mongoid::Factory.build(Person, { "name" => "Durran" })
#
# @param [ Class ] klass The class to instantiate from if _type is not present.
# @param [ Hash ] attributes The document attributes.
#
# @return [ Document ] The instantiated document.
def build(klass, attributes = nil)
attributes ||= {}
type = attributes[TYPE] || attributes[TYPE.to_sym]
if type && klass._types.include?(type)
type.constantize.new(attributes)
else
klass.new(attributes)
end
end
# Builds a new +Document+ from the supplied attributes loaded from the
# database.
#
# If a criteria object is given, it is used in two ways:
# 1. If the criteria has a list of fields specified via #only,
# only those fields are populated in the returned document.
# 2. If the criteria has a referencing association (i.e., this document
# is being instantiated as an association of another document),
# the other document is also populated in the returned document's
# reverse association, if one exists.
#
# @example Build the document.
# Mongoid::Factory.from_db(Person, { "name" => "Durran" })
#
# @param [ Class ] klass The class to instantiate from if _type is not present.
# @param [ Hash ] attributes The document attributes.
# @param [ Criteria ] criteria Optional criteria object.
# @param [ Hash ] selected_fields Fields which were retrieved via
# #only. If selected_fields are specified, fields not listed in it
# will not be accessible in the returned document.
#
# @return [ Document ] The instantiated document.
end
|
Falkor/falkorlib | lib/falkorlib/git/flow.rb | FalkorLib.GitFlow.start | ruby | def start(type, name, path = Dir.pwd, optional_args = '')
command(name, type, 'start', path, optional_args)
end | git flow {feature, hotfix, release, support} start <name> | train | https://github.com/Falkor/falkorlib/blob/1a6d732e8fd5550efb7c98a87ee97fcd2e051858/lib/falkorlib/git/flow.rb#L169-L171 | module GitFlow
module_function
## OLD version
## Check if git-flow is initialized
# def init?(path = Dir.pwd)
# res = FalkorLib::Git.init?(path)
# Dir.chdir(path) do
# gf_check = `git config --get-regexp 'gitflow*'`
# res &= ! gf_check.empty?
# end
# res
# end # init?(path = Dir.pwd)
###### init? ######
# Check if gitflow has been initialized
##
def init?(dir = Dir.pwd)
res = FalkorLib::Git.init?(dir)
res &= !FalkorLib::Git.config('gitflow*', dir).empty? if res
res
end # init?
## Initialize a git-flow repository
# Supported options:
# :interactive [boolean] confirm Gitflow branch names
# :master [string] Branch name for production releases
# :develop [string] Branch name for development commits
def init(path = Dir.pwd, options = {})
exit_status = FalkorLib::Git.init(path, options)
unless command?('git-flow')
# Check (mainly for Linux) if the command is not available under `/usr/lib/git-core`
git_lib = '/usr/lib/git-core/'
error "you shall install git-flow: see https://github.com/nvie/gitflow/wiki/Installation" unless File.exist?(File.join(git_lib, 'git-flow'))
end
remotes = FalkorLib::Git.remotes(path)
git_root_dir = FalkorLib::Git.rootdir( path )
Dir.chdir( git_root_dir ) do
unless FalkorLib::Git.commits?( git_root_dir)
warn "Not yet any commit detected in this repository."
readme = 'README.md'
unless File.exist?( readme )
answer = ask(cyan("=> initialize a commit with an [empty] #{readme} file (Y|n)?"), 'Yes')
exit 0 if answer =~ /n.*/i
FileUtils.touch(readme)
end
FalkorLib::Git.add(readme, "Initiate the repository with a '#{readme}' file")
end
branches = FalkorLib::Git.list_branch(path)
gitflow_branches = FalkorLib.config.gitflow[:branches].clone
# correct eventually the considered branch from the options
gitflow_branches.each do |t, _b|
gitflow_branches[t] = options[t.to_sym] if options[t.to_sym]
confs = FalkorLib::Git.config('gitflow*', path, :hash => true)
gitflow_branches[t] = confs["gitflow.branch.#{t}"] unless confs.empty?
end
if options[:interactive]
gitflow_branches[:master] = ask("=> branch name for production releases", gitflow_branches[:master])
gitflow_branches[:develop] = ask("=> branch name for development commits", gitflow_branches[:develop])
end
ap gitflow_branches if options[:debug]
if remotes.include?( 'origin' )
info "=> configure remote (tracked) branches"
exit_status = FalkorLib::Git.fetch(path)
gitflow_branches.each do |_type, branch|
if branches.include? "remotes/origin/#{branch}"
exit_status = FalkorLib::Git.grab(branch, path)
else
unless branches.include? branch
info "=> creating the branch '#{branch}'"
FalkorLib::Git.create_branch( branch, path )
end
exit_status = FalkorLib::Git.publish(branch, path )
end
end
else
gitflow_branches.each do |_type, branch|
unless branches.include? branch
info " => creating the branch '#{branch}'"
exit_status = FalkorLib::Git.create_branch( branch, path )
end
end
end
#info "initialize git flow configs"
gitflow_branches.each do |t, branch|
exit_status = execute "git config gitflow.branch.#{t} #{branch}"
end
FalkorLib.config.gitflow[:prefix].each do |t, prefix|
exit_status = execute "git config gitflow.prefix.#{t} #{prefix}"
end
devel_branch = gitflow_branches[:develop]
#info "checkout to the main development branch '#{devel_branch}'"
exit_status = run %(
git checkout #{devel_branch}
)
# git config branch.$(git rev-parse --abbrev-ref HEAD).mergeoptions --no-edit for the develop branch
exit_status = execute "git config branch.#{devel_branch}.mergeoptions --no-edit"
if branches.include?('master') && !gitflow_branches.values.include?( 'master' )
warn "Your git-flow confuguration does not hold the 'master' branch any more"
warn "You probably want to get rid of it asap by running 'git branch -d master'"
end
if devel_branch != 'master' &&
remotes.include?( 'origin' ) &&
branches.include?( 'remotes/origin/master')
warn "You might want to change the remote default branch to point to '#{devel_branch}"
puts "=> On github: Settings > Default Branch > #{devel_branch}"
puts "=> On the remote bare Git repository: 'git symbolic-ref HEAD refs/head/#{devel_branch}'"
end
end
exit_status
end
## generic function to run any of the gitflow commands
def command(name, type = 'feature', action = 'start', path = Dir.pwd, optional_args = '')
error "Invalid git-flow type '#{type}'" unless %w(feature release hotfix support).include?(type)
error "Invalid action '#{action}'" unless %w(start finish).include?(action)
error "You must provide a name" if name == ''
error "The name '#{name}' cannot contain spaces" if name =~ /\s+/
exit_status = 1
Dir.chdir( FalkorLib::Git.rootdir(path) ) do
exit_status = run %(
git flow #{type} #{action} #{optional_args} #{name}
)
end
exit_status
end
## git flow {feature, hotfix, release, support} start <name>
## git flow {feature, hotfix, release, support} finish <name>
def finish(type, name, path = Dir.pwd, optional_args = '')
command(name, type, 'finish', path, optional_args)
end
###
# Return the Gitflow branch
# :master: Master Branch name for production releases
# :develop:
##
def branches(type = :master, dir = Dir.pwd, _options = {})
FalkorLib::Git.config("gitflow.branch.#{type}", dir)
#confs[type.to_sym]
end # master_branch
###### guess_gitflow_config ######
# Guess the gitflow configuration
##
def guess_gitflow_config(dir = Dir.pwd, options = {})
path = normalized_path(dir)
use_git = FalkorLib::Git.init?(path)
return {} if (!use_git or !FalkorLib::GitFlow.init?(path))
rootdir = FalkorLib::Git.rootdir(path)
local_config = FalkorLib::Config.get(rootdir, :local)
return local_config[:gitflow] if local_config[:gitflow]
config = FalkorLib::Config::GitFlow::DEFAULTS.clone
[ :master, :develop ].each do |br|
config[:branches][br.to_sym] = FalkorLib::Git.config("gitflow.branch.#{br}", rootdir)
end
[ :feature, :release, :hotfix, :support, :versiontag ].each do |p|
config[:prefix][p.to_sym] = FalkorLib::Git.config("gitflow.prefix.#{p}", rootdir)
end
config
end # guess_gitflow_config
end # module FalkorLib::GitFlow
|
robertwahler/repo_manager | lib/repo_manager/actions/base_action.rb | RepoManager.BaseAction.parse_options | ruby | def parse_options(parser_configuration = {})
raise_on_invalid_option = parser_configuration.has_key?(:raise_on_invalid_option) ? parser_configuration[:raise_on_invalid_option] : true
parse_base_options = parser_configuration.has_key?(:parse_base_options) ? parser_configuration[:parse_base_options] : true
logger.debug "parsing args: #{@args.inspect}, raise_on_invalid_option: #{raise_on_invalid_option}, parse_base_options: #{parse_base_options}"
@option_parser ||= OptionParser.new
option_parser.banner = help + "\n\nOptions:"
if parse_base_options
option_parser.on("--template [NAME]", "Use a template to render output. (default=default.slim)") do |t|
options[:template] = t.nil? ? "default.slim" : t
@template = options[:template]
end
option_parser.on("--output FILENAME", "Render output directly to a file") do |f|
options[:output] = f
@output = options[:output]
end
option_parser.on("--force", "Overwrite file output without prompting") do |f|
options[:force] = f
end
option_parser.on("-r", "--repos a1,a2,a3", "--asset a1,a2,a3", "--filter a1,a2,a3", Array, "List of regex asset name filters") do |list|
options[:filter] = list
end
# NOTE: OptionParser will add short options, there is no way to stop '-m' from being the same as '--match'
option_parser.on("--match [MODE]", "Asset filter match mode. MODE=ALL (default), FIRST, EXACT, or ONE (fails if more than 1 match)") do |m|
options[:match] = m || "ALL"
options[:match].upcase!
unless ["ALL", "FIRST", "EXACT", "ONE"].include?(options[:match])
puts "invalid match mode option: #{options[:match]}"
exit 1
end
end
end
# allow decendants to add options
yield option_parser if block_given?
# reprocess args for known options, see binary wrapper for first pass
# (first pass doesn't know about action specific options), find all
# action options that may come after the action/subcommand (options
# before subcommand have already been processed) and its args
logger.debug "args before reprocessing: #{@args.inspect}"
begin
option_parser.order!(@args)
rescue OptionParser::InvalidOption => e
if raise_on_invalid_option
puts "option error: #{e}"
puts option_parser
exit 1
else
# parse and consume until we hit an unknown option (not arg), put it back so it
# can be shifted into the new array
e.recover(@args)
end
end
logger.debug "args before unknown collection: #{@args.inspect}"
unknown_args = []
while unknown_arg = @args.shift
logger.debug "unknown_arg: #{unknown_arg.inspect}"
unknown_args << unknown_arg
begin
# consume options and stop at an arg
option_parser.order!(@args)
rescue OptionParser::InvalidOption => e
if raise_on_invalid_option
puts "option error: #{e}"
puts option_parser
exit 1
else
# parse and consume until we hit an unknown option (not arg), put it back so it
# can be shifted into the new array
e.recover(@args)
end
end
end
logger.debug "args after unknown collection: #{@args.inspect}"
@args = unknown_args.dup
logger.debug "args after reprocessing: #{@args.inspect}"
logger.debug "configuration after reprocessing: #{@configuration.inspect}"
logger.debug "options after reprocessing: #{@options.inspect}"
option_parser
end | Parse generic action options for all decendant actions
@return [OptionParser] for use by decendant actions | train | https://github.com/robertwahler/repo_manager/blob/d945f1cb6ac48b5689b633fcc029fd77c6a02d09/lib/repo_manager/actions/base_action.rb#L47-L136 | class BaseAction
# main configuration hash
attr_reader :configuration
# options hash, read from configuration hash
attr_reader :options
# args as passed on command line
attr_reader :args
# filename to template for rendering
attr_accessor :template
# filename to write output
attr_accessor :output
# numeric exit code set from return of process method
attr_reader :exit_code
# bin wrapper option parser object
attr_accessor :option_parser
def initialize(args=[], config={})
@configuration = config.deep_clone
@options = @configuration[:options] || {}
@args = args.dup
logger.debug "initialize with args: #{@args.inspect}"
end
# Parse generic action options for all decendant actions
#
# @return [OptionParser] for use by decendant actions
def execute
before_execute
parse_options
@exit_code = process
after_execute
@exit_code
end
# handle "assets to items" transformations, if any, and write to output
def process
write_to_output(render)
end
# TODO: add exception handler and pass return values
def write_to_output(content)
if output
logger.debug "write_to_output called with output : #{output}"
if overwrite_output?
logger.debug "writing output to : #{output}"
File.open(output, 'wb') {|f| f.write(content) }
else
logger.info "existing file not overwritten. To overwrite automatically, use the '--force' option."
end
else
logger.debug "writing to STDOUT"
print content
end
return 0
end
# TODO: create items/app_item class with at least the 'name' accessor
#
# assets: raw configuration handling system for items
def assets
return @assets if @assets
@assets = AssetManager.new.assets(asset_options)
end
# Used by asset factory to create assets. Override in app_action.rb or a
# descendant to set the class to be instantiated by by the AssetManager.
#
# @return [Symbol] asset type
def asset_type
:app_asset
end
# asset options separated from assets to make it easier to override assets
def asset_options
# include all base action options
result = options.deep_clone
# anything left on the command line should be filters as all options have
# been consumed, for pass through options, filters must be ignored by overwritting them
filters = args.dup
filters += result[:filter] if result[:filter]
result = result.merge(:filter => filters) unless filters.empty?
# asset type to create
type = result[:type] || asset_type
result = result.merge(:type => type)
# optional key: :assets_folder, absolute path or relative to config file if :base_folder is specified
result = result.merge(:assets_folder => configuration[:folders][:assets]) if configuration[:folders]
# optional key: :base_folder is the folder that contains the main config file
result = result.merge(:base_folder => File.dirname(configuration[:configuration_filename])) if configuration[:configuration_filename]
result
end
# items to be rendered, defaults to assets, override to suit
#
# @return [Array] of items to be rendered
def items
assets
end
# Render items result to a string
#
# @return [String] suitable for displaying on STDOUT or writing to a file
def render(view_options=configuration)
logger.debug "rendering"
result = ""
if template
logger.debug "rendering with template : #{template}"
view = AppView.new(items, view_options)
view.template = template
result = view.render
else
items.each_with_index do |item, index|
result += "\n" unless index == 0
result += item.name.green + ":\n"
if item.respond_to?(:attributes)
attributes = item.attributes.deep_clone
result += attributes.recursively_stringify_keys!.to_conf.gsub(/\s+$/, '') # strip trailing whitespace from YAML
result += "\n"
end
end
end
result
end
# Convert method comments block to help text
#
# @return [String] suitable for displaying on STDOUT
def help(help_options={})
comment_starting_with = help_options[:comment_starting_with] || ""
located_in_file = help_options[:located_in_file] || __FILE__
text = File.read(located_in_file)
result = text.match(/(^\s*#\s*#{comment_starting_with}.*)^\s*class .* AppAction/m)
result = $1
result = result.gsub(/ @example/, '')
result = result.gsub(/ @return \[Number\]/, ' Exit code:')
result = result.gsub(/ @return .*/, '')
result = result.gsub(/ @see .*$/, '')
# strip the leading whitespace, the '#' and space
result = result.gsub(/^\s*# ?/, '')
# strip surrounding whitespace
result.strip
end
# @return [Boolean] true if output doesn't exist or it is OK to overwrite
def overwrite_output?
return true unless File.exists?(output)
if options[:force]
logger.debug "overwriting output with --force option"
return true
end
unless STDOUT.isatty
logger.debug "TTY not detected, skipping overwrite prompt"
return false
end
result = false
print "File '#{output}' exists. Would you like overwrite? [y/n]: "
case gets.strip
when 'Y', 'y', 'yes'
logger.debug "user answered yes to overwrite prompt"
result = true
else
logger.debug "user answered no to overwrite prompt"
end
result
end
# callbacks
def before_execute
logger.debug "callback: before_execute"
end
def after_execute
logger.debug "callback: after_execute"
end
end
|
david942j/seccomp-tools | lib/seccomp-tools/disasm/disasm.rb | SeccompTools.Disasm.disasm | ruby | def disasm(raw, arch: nil)
codes = to_bpf(raw, arch)
contexts = Array.new(codes.size) { Set.new }
contexts[0].add(Context.new)
# all we care is if A is exactly one of data[*]
dis = codes.zip(contexts).map do |code, ctxs|
ctxs.each do |ctx|
code.branch(ctx) do |pc, c|
contexts[pc].add(c) unless pc >= contexts.size
end
end
code.contexts = ctxs
code.disasm
end.join("\n")
<<EOS + dis + "\n"
line CODE JT JF K
=================================
EOS
end | Disassemble bpf codes.
@param [String] raw
The raw bpf bytes.
@param [Symbol] arch
Architecture. | train | https://github.com/david942j/seccomp-tools/blob/8dfc288a28eab2d683d1a4cc0fed405d75dc5595/lib/seccomp-tools/disasm/disasm.rb#L17-L35 | module Disasm
module_function
# Disassemble bpf codes.
# @param [String] raw
# The raw bpf bytes.
# @param [Symbol] arch
# Architecture.
# Convert raw bpf string to array of {BPF}.
# @param [String] raw
# @param [Symbol] arch
# @return [Array<BPF>]
def to_bpf(raw, arch)
arch ||= Util.system_arch
raw.scan(/.{8}/m).map.with_index { |b, i| BPF.new(b, arch, i) }
end
end
|
murb/workbook | lib/workbook/column.rb | Workbook.Column.column_type | ruby | def column_type
return @column_type if defined?(@column_type)
ind = self.index
table[1..500].each do |row|
if row[ind] and row[ind].cell_type
cel_column_type = row[ind].cell_type
if !defined?(@column_type) or @column_type.nil?
@column_type = cel_column_type
elsif cel_column_type == @column_type or cel_column_type == :nil
else
@column_type = :string
break
end
end
end
return @column_type
end | character limit
Returns column type, either :primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean | train | https://github.com/murb/workbook/blob/2e12f43c882b7c235455192a2fc48183fe6ec965/lib/workbook/column.rb#L17-L33 | class Column
attr_accessor :limit, :width #character limit
def initialize(table=nil, options={})
self.table = table
options.each{ |k,v| self.public_send("#{k}=",v) }
end
# Returns column type, either :primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean
# Returns index of the column within the table's columns-set
# @return [Integer, NilClass]
def index
table.columns.index self
end
# Set the table this column belongs to
# @param [Workbook::Table] table this column belongs to
def table= table
raise(ArgumentError, "value should be nil or Workbook::Table") unless [NilClass,Workbook::Table].include? table.class
@table = table
end
# @return [Workbook::Table]
def table
@table
end
def column_type= column_type
if [:primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean].include? column_type
@column_type = column_type
else
raise ArgumentError, "value should be a symbol indicating a primitive type, e.g. a string, or an integer (valid values are: :primary_key, :string, :text, :integer, :float, :decimal, :datetime, :date, :binary, :boolean)"
end
end
def head_value
begin
table.header[index].value
rescue
return "!noheader!"
end
end
def inspect
"<Workbook::Column index=#{index}, header=#{head_value}>"
end
#default cell
def default
return @default
end
def default= value
@default = value if value.class == Cell
@default = Cell.new(value)
end
class << self
# Helps to convert from e.g. "AA" to 26
# @param [String] string that typically identifies a column
# @return [Integer]
def alpha_index_to_number_index string
sum = 0
string.upcase.chars.each_with_index do | char, char_index|
sum = sum * 26 + char.unpack('U')[0]-64
end
return sum-1
end
end
end
|
motion-kit/motion-kit | lib/motion-kit-osx/helpers/nsmenu_helpers.rb | MotionKit.MenuLayout.add | ruby | def add(title_or_item, element_id=nil, options={}, &block)
if element_id.is_a?(NSDictionary)
options = element_id
element_id = nil
end
if title_or_item.is_a?(NSMenuItem)
item = title_or_item
menu = nil
retval = item
elsif title_or_item.is_a?(NSMenu)
menu = title_or_item
item = self.item(menu.title, options)
item.submenu = menu
retval = menu
else
title = title_or_item
item = self.item(title, options)
if block
menu = create(title)
item.submenu = menu
retval = menu
else
retval = item
end
end
self.apply(:add_child, item)
if menu && block
menuitem_was = @menu_item
@menu_item = item
context(menu, &block)
@menu_item = menuitem_was
end
if element_id
create(retval, element_id)
end
return retval
end | override 'add'; menus are just a horse of a different color. | train | https://github.com/motion-kit/motion-kit/blob/fa01dd08497b0dd01090156e58552be9d3b25ef1/lib/motion-kit-osx/helpers/nsmenu_helpers.rb#L36-L78 | class MenuLayout < TreeLayout
# A more sensible name for the menu that is created.
def menu
self.view
end
# platform specific default root view
def default_root
# child WindowLayout classes can return *their* NSView subclass from self.nsview_class
menu_class = self.class.targets || NSMenu
menu_class.alloc.init
end
def add_child(submenu, options={})
target.addItem(submenu)
end
def remove_child(submenu)
target.removeItem(submenu)
end
# override root to allow a menu title for the top level menu
def root(element, element_id=nil, &block)
if element && element.is_a?(NSString)
element = NSMenu.alloc.initWithTitle(element)
end
super(element, element_id, &block)
end
# override 'add'; menus are just a horse of a different color.
def create(title_or_item, element_id=nil, &block)
if title_or_item.is_a?(NSString)
item = NSMenu.alloc.initWithTitle(title_or_item)
else
item = title_or_item
end
return super(item, element_id, &block)
end
def item(title, options={})
action = options.fetch(:action, nil)
key = options.fetch(:keyEquivalent, options.fetch(:key, ''))
key ||= '' # key must never be nil
item = NSMenuItem.alloc.initWithTitle(title, action: action, keyEquivalent: key)
mask = options.fetch(:keyEquivalentModifierMask, options.fetch(:mask, nil))
unless mask.nil?
item.keyEquivalentModifierMask = mask
end
return item
end
end
|
bradleymarques/carbon_date | lib/carbon_date/date.rb | CarbonDate.Date.set_date | ruby | def set_date(year, month, day)
raise invalid_date if (year.nil? || year == 0 || !((1..12).include? month))
begin
::Date.new(year, month, day)
rescue ArgumentError
raise invalid_date
end
@year = year.to_i
@month = month
@day = day
end | An atomic function to set the date component (year, month and day)
Raises +ArgumentError+ if invalid date | train | https://github.com/bradleymarques/carbon_date/blob/778b2a58e0d0ae554d36fb92c356a6d9fc6415b4/lib/carbon_date/date.rb#L88-L98 | class Date
# The class-wide Formatter to use to turn CarbonDate::Dates into human-readable strings
@formatter = CarbonDate::StandardFormatter.new
class << self
# Used to get and set the CarbonDate::Date.formatter
attr_accessor :formatter
end
##
# The precisions available
PRECISION =
[
:billion_years,
:hundred_million_years,
:ten_million_years,
:million_years,
:hundred_thousand_years,
:ten_thousand_years,
:millennium,
:century,
:decade,
:year,
:month,
:day,
:hour,
:minute,
:second
]
# The date's precision
attr_reader :precision
# The date's year. Cannot be 0 as there is no 0 year in the Gregorian Calendar.
attr_reader :year
# The date's month in range (1..12)
attr_reader :month
# The date's day counting from 1
attr_reader :day
# The date's hour in range (0..23)
attr_reader :hour
# The date's minute in range (0..59)
attr_reader :minute
# The date's second in range (0..59)
attr_reader :second
# Creates a new CarbonDate::Date
#
# Params:
# +year+ - The date's year, as an integer
# +month+ - The date's month, as an integer
# +day+ - The date's day, as an integer
# +hour+ - The date's hour, as an integer
# +minute+ - The date's minute, as an integer
# +second+ - The date's second, as an integer
# +:precision+ - The date's precision, as a symbol. For avaiable options, see CarbonDate::Date::PRECISION
# Raises:
# +ArgumentError+ if validations fail
# Returns:
# +CarbonDate::Date+ object
def initialize(year = 1970, month = 1, day = 1, hour = 0, minute = 0, second = 0, precision: :second)
month = 1 if month == 0
day = 1 if day == 0
self.precision = precision
self.set_date(year, month, day)
self.hour = hour
self.minute = minute
self.second = second
end
# Sets the precision
# Raises +ArgumentError+ if invalid symbol
def precision=(value)
raise ArgumentError.new "Invalid precision #{value}" unless PRECISION.include? value
@precision = value
end
# An atomic function to set the date component (year, month and day)
# Raises +ArgumentError+ if invalid date
# Sets the year. Calls set_date() to ensure atomicity.
def year=(value)
set_date(value, @month, @day)
end
# Sets the month. Calls set_date() to ensure atomicity.
# Raises +ArgumentError+ if:
# - value is not in (1..12)
def month=(value)
set_date(@year, value, @day)
end
# Sets the day. Calls set_date() to ensure atomicity.
# Raises +ArgumentError+ if:
# - value is not in (1..12)
def day=(value)
set_date(@year, @month, value)
end
# Sets the hour with validation
# Raises +ArgumentError+ unless in the range (0..23)
def hour=(value)
raise ArgumentError.new "Invalid hour #{value}" unless (0..23).include? value
@hour = value
end
# Sets the minute with validation
# Raises +ArgumentError+ unless in the range (0..59)
def minute=(value)
raise ArgumentError.new "Invalid minute #{value}" unless (0..59).include? value
@minute = value
end
# Sets the second with validation
# Raises +ArgumentError+ unless in the range (0..59)
def second=(value)
raise ArgumentError.new "Invalid second #{value}" unless (0..59).include? value
@second = value
end
# Converts from an iso8601 datetime format, with precision
# Dates like these are found on Wikidata (https://www.wikidata.org)
# Params:
# +string+ -> the iso8601 datetime in the form +1989-03-23T23:11:08Z
# +precision_level+ -> an integer between 0 and 14 (see CarbonDate::Date::PRECISION)
# Returns:
# +CarbonDate::Date+ object
def self.iso8601(string, precision_level)
p = PRECISION[precision_level]
raise ArgumentError.new("Invalid precision level #{precision_level}") unless p
# If there is an initial '-' symbol on the year, it needs to be treateded differenty than the other '-'.
# Example: -0500-01-01 is the 1st January 500 BCE
if string[0] == '-'
string = string[1..(string.length - 1)] # Drop the initial '-'
bce = true
else
bce = false
end
d = string.split('T').map { |x| x.split(/[-:]/) }.flatten.map(&:to_i)
year = bce ? -d[0] : d[0]
CarbonDate::Date.new(year, d[1], d[2], d[3], d[4], d[5], precision: p)
end
# Prints a human-readable version of the date, using CarbonDate::Date.formatter
def to_s
CarbonDate::Date.formatter.date_to_string(self)
end
# Convert to a standard Ruby Date object
# Returns:
# +::Date+ object
def to_date
::Date.new(@year, @month, @day)
end
# Convert into a standard Ruby DateTime object
# Returns:
# +::DateTime+ object
def to_datetime
::DateTime.new(@year, @month, @day, @hour, @minute, @second)
end
# Test equality of two CarbonDate::Dates
# For equality, the two dates have to have the same:
# - precision
# - year
# - month
# - day
# - hour
# - minute
# - second
def ==(another_date)
return false if self.precision != another_date.precision
self.to_datetime == another_date.to_datetime
end
# Tests if this CarbonDate::Date is in the past relative to the other CarbonDate::Date
# Defers to DateTime#<=
def <=(another_date)
self.to_datetime <= another_date.to_datetime
end
# Tests if this CarbonDate::Date is in the future relative to the other CarbonDate::Date
# Defers to DateTime#>=
def >=(another_date)
self.to_datetime >= another_date.to_datetime
end
def invalid_date
ArgumentError.new("Invalid date #{year}-#{month}-#{day}")
end
end
|
wvanbergen/request-log-analyzer | lib/request_log_analyzer/file_format.rb | RequestLogAnalyzer::FileFormat.Base.parse_line | ruby | def parse_line(line, &warning_handler)
line_definitions.each do |_lt, definition|
match = definition.matches(line, &warning_handler)
return match if match
end
nil
end | Parses a line by trying to parse it using every line definition in this file format | train | https://github.com/wvanbergen/request-log-analyzer/blob/b83865d440278583ac8e4901bb33878244fd7c75/lib/request_log_analyzer/file_format.rb#L297-L304 | class Base
extend RequestLogAnalyzer::ClassLevelInheritableAttributes
inheritable_attributes :line_definer, :report_definer
attr_reader :line_definitions, :report_trackers
####################################################################################
# CLASS METHODS for format definition
####################################################################################
# Specifies a single line defintions.
def self.line_definition(name, &block)
line_definer.define_line(name, &block)
end
# Specifies multiple line definitions at once using a block
def self.format_definition(&_block)
if block_given?
yield line_definer
else
return line_definer
end
end
# Specifies the summary report using a block.
def self.report(mode = :append, &_block)
report_definer.reset! if mode == :overwrite
yield(report_definer)
end
# Setup the default line definer.
def self.line_definer
@line_definer ||= ::RequestLogAnalyzer::LineDefinition::Definer.new
end
# Setup the default report definer.
def self.report_definer
@report_definer ||= ::RequestLogAnalyzer::Aggregator::Summarizer::Definer.new
end
# Setup the default Request class.
Request = ::RequestLogAnalyzer::Request
####################################################################################
# Instantiation
####################################################################################
def self.create(*_args)
# Ignore arguments
new(line_definer.line_definitions, report_definer.trackers)
end
def initialize(line_definitions = {}, report_trackers = [])
@line_definitions, @report_trackers = line_definitions, report_trackers
end
####################################################################################
# INSTANCE methods
####################################################################################
# Returns the Request class of this file format
def request_class
self.class::Request
end
# Returns a Request instance with the given parsed lines that should be provided as hashes.
def request(*hashes)
request_class.create(self, *hashes)
end
# Checks whether the file format is valid so it can be safely used with RLA.
def well_formed?
valid_line_definitions? && valid_request_class?
end
alias_method :valid?, :well_formed?
# Checks whether the line definitions form a valid language.
# A file format should have at least a header and a footer line type
def valid_line_definitions?
line_definitions.any? { |(_, ld)| ld.header } && line_definitions.any? { |(_, ld)| ld.footer }
end
# Checks whether the request class inherits from the base Request class.
def valid_request_class?
request_class.ancestors.include?(RequestLogAnalyzer::Request)
end
# Returns true if this language captures the given symbol in one of its line definitions
def captures?(name)
line_definitions.any? { |(_, ld)| ld.captures?(name) }
end
# Function that a file format con implement to monkey patch the environment.
# * <tt>controller</tt> The environment is provided as a controller instance
def setup_environment(_controller)
end
# Parses a line by trying to parse it using every line definition in this file format
# Returns the max line length for this file format if any.
def max_line_length
self.class.const_get(MAX_LINE_LENGTH) if self.class.const_defined?(:MAX_LINE_LENGTH)
end
def line_divider
self.class.const_get(LINE_DIVIDER) if self.class.const_defined?(:LINE_DIVIDER)
end
end
|
jaymcgavren/zyps | lib/zyps.rb | Zyps.Behavior.copy | ruby | def copy
copy = self.clone #Currently, we overwrite everything anyway, but we may add some clonable attributes later.
#Make a deep copy of all actions.
copy.instance_eval {@actions = []}
self.actions.each {|action| copy.add_action(action.copy)}
#Make a deep copy of all conditions.
copy.instance_eval {@conditions = []}
self.conditions.each {|condition| copy.add_condition(condition.copy)}
copy
end | Make a deep copy. | train | https://github.com/jaymcgavren/zyps/blob/7fa9dc497abc30fe2d1a2a17e129628ffb0456fb/lib/zyps.rb#L552-L561 | class Behavior
#An array of Condition subclasses.
#Condition#select(actor, targets) will be called on each.
attr_accessor :conditions
#An array of Action subclasses.
#Action#start(actor, targets) and action.do(actor, targets) will be called on each when all conditions are true.
#Action#stop(actor, targets) will be called when any condition is false.
attr_accessor :actions
#The Creature this behavior belongs to.
attr_accessor :creature
#Number of updates before behavior is allowed to select a new group of targets to act on.
attr_accessor :condition_frequency
#Will be used to distribute condition processing time between all Behaviors with the same condition_frequency.
@@condition_order = Hash.new {|h, k| h[k] = 0}
#Takes a hash with these keys and defaults:
# :actions => []
# :conditions => []
# :condition_frequency => 1
def initialize (options = {})
options = {
:actions => [],
:conditions => [],
:condition_frequency => 1
}.merge(options)
@actions = []
@conditions = []
options[:actions].each {|action| self.add_action(action)}
options[:conditions].each {|condition| self.add_condition(condition)}
self.condition_frequency = options[:condition_frequency]
#Tracks number of calls to perform() so conditions can be evaluated with appropriate frequency.
@condition_evaluation_count = 0
#Targets currently selected to act upon.
@current_targets = []
end
#Add an Action to this behavior.
def add_action(action)
action.behavior = self
@actions << action
end
#Remove an Action from this behavior.
def remove_action(action)
action.behavior = nil
@actions.delete(action)
end
#An Enumerable::Enumerator over each Action.
def actions; Enumerable::Enumerator.new(@actions, :each); end
#Remove all Actions from this behavior.
def clear_actions
@actions.clone.each {|action| self.remove_action(action)}
end
#Number of Actions.
def action_count; @actions.length; end
#Add a Condition to this behavior.
def add_condition(condition)
condition.behavior = self
@conditions << condition
end
#Remove a Condition from this behavior.
def remove_condition(condition)
condition.behavior = nil
@conditions.delete(condition)
end
#An Enumerable::Enumerator over each Condition.
def conditions; Enumerable::Enumerator.new(@conditions, :each); end
#Remove all Conditions from this behavior.
def clear_conditions
@conditions.clone.each {|condition| self.remove_condition(condition)}
end
#Number of Conditions.
def condition_count; @conditions.length; end
def condition_frequency= (value)
#Condition frequency must be 1 or more.
@condition_frequency = (value >= 1 ? value : 1)
#This will be used to distribute condition evaluation time among all behaviors with this frequency.
@condition_order = @@condition_order[@condition_frequency]
@@condition_order[@condition_frequency] += 1
end
#Make a deep copy.
#Finds targets that meet all conditions, then acts on them.
#Calls select(actor, targets) on each Condition, each time discarding targets that fail.
#Then on each Action, calls Action#start(actor, targets) (if not already started) followed by Action#do(actor, targets).
#If no matching targets are found, calls Action#stop(actor, targets) on each Action.
#If there are no conditions, actions will occur regardless of targets.
def perform(actor, targets)
if condition_evaluation_turn?
@current_targets = targets.clone
conditions.each {|condition| @current_targets = condition.select(actor, @current_targets)}
end
actions.each do |action|
if @current_targets.empty? and ! @conditions.empty?
action.stop(actor, targets) if action.started? #Not @current_targets; that array is empty.
else
action.start(actor, @current_targets) unless action.started?
action.do(actor, @current_targets)
end
end
end
#True if all attributes, actions and conditions are the same.
def ==(other)
return false if @actions != other.actions.to_a
return false if @conditions != other.conditions.to_a
return false if condition_frequency != other.condition_frequency
true
end
#Overloads the << operator to put the new item into the correct
#list or assign it to the correct attribute.
#Assignment is done based on item's class or a parent class of item.
def <<(item)
if item.kind_of? Condition
add_condition(item)
elsif item.kind_of? Action
add_action(item)
else
raise "Invalid item: #{item.class}"
end
self
end
def to_s
[
(@actions + @conditions).map{|o| o.class}.join(", "),
"[#{@current_targets.map{|o| o.name || sprintf("%07X", o.identifier)}.join(',')}]"
].join(" ")
end
private
#Return true if it's our turn to choose targets, false otherwise.
def condition_evaluation_turn?
#Every condition_frequency turns (plus our turn order within the group), return true.
our_turn = ((@condition_evaluation_count + @condition_order) % @condition_frequency == 0) ? true : false
#Track number of calls to perform() for staggering condition evaluation.
@condition_evaluation_count += 1
our_turn
end
end
|
state-machines/state_machines | lib/state_machines/state.rb | StateMachines.State.context_methods | ruby | def context_methods
@context.instance_methods.inject({}) do |methods, name|
methods.merge(name.to_sym => @context.instance_method(name))
end
end | The list of methods that have been defined in this state's context | train | https://github.com/state-machines/state_machines/blob/10b03af5fc9245bcb09bbd9c40c58ffba9a85422/lib/state_machines/state.rb#L208-L212 | class State
# The state machine for which this state is defined
attr_reader :machine
# The unique identifier for the state used in event and callback definitions
attr_reader :name
# The fully-qualified identifier for the state, scoped by the machine's
# namespace
attr_reader :qualified_name
# The human-readable name for the state
attr_writer :human_name
# The value that is written to a machine's attribute when an object
# transitions into this state
attr_writer :value
# Whether this state's value should be cached after being evaluated
attr_accessor :cache
# Whether or not this state is the initial state to use for new objects
attr_accessor :initial
alias_method :initial?, :initial
# A custom lambda block for determining whether a given value matches this
# state
attr_accessor :matcher
# Creates a new state within the context of the given machine.
#
# Configuration options:
# * <tt>:initial</tt> - Whether this state is the beginning state for the
# machine. Default is false.
# * <tt>:value</tt> - The value to store when an object transitions to this
# state. Default is the name (stringified).
# * <tt>:cache</tt> - If a dynamic value (via a lambda block) is being used,
# then setting this to true will cache the evaluated result
# * <tt>:if</tt> - Determines whether a value matches this state
# (e.g. :value => lambda {Time.now}, :if => lambda {|state| !state.nil?}).
# By default, the configured value is matched.
# * <tt>:human_name</tt> - The human-readable version of this state's name
def initialize(machine, name, options = {}) #:nodoc:
options.assert_valid_keys(:initial, :value, :cache, :if, :human_name)
@machine = machine
@name = name
@qualified_name = name && machine.namespace ? :"#{machine.namespace}_#{name}" : name
@human_name = options[:human_name] || (@name ? @name.to_s.tr('_', ' ') : 'nil')
@value = options.include?(:value) ? options[:value] : name && name.to_s
@cache = options[:cache]
@matcher = options[:if]
@initial = options[:initial] == true
@context = StateContext.new(self)
if name
conflicting_machines = machine.owner_class.state_machines.select { |other_name, other_machine| other_machine != machine && other_machine.states[qualified_name, :qualified_name] }
# Output a warning if another machine has a conflicting qualified name
# for a different attribute
if conflict = conflicting_machines.detect { |other_name, other_machine| other_machine.attribute != machine.attribute }
name, other_machine = conflict
warn "State #{qualified_name.inspect} for #{machine.name.inspect} is already defined in #{other_machine.name.inspect}"
elsif conflicting_machines.empty?
# Only bother adding predicates when another machine for the same
# attribute hasn't already done so
add_predicate
end
end
end
# Creates a copy of this state, excluding the context to prevent conflicts
# across different machines.
def initialize_copy(orig) #:nodoc:
super
@context = StateContext.new(self)
end
def machine=(machine)
@machine = machine
@context = StateContext.new(self)
end
# Determines whether there are any states that can be transitioned to from
# this state. If there are none, then this state is considered *final*.
# Any objects in a final state will remain so forever given the current
# machine's definition.
def final?
!machine.events.any? do |event|
event.branches.any? do |branch|
branch.state_requirements.any? do |requirement|
requirement[:from].matches?(name) && !requirement[:to].matches?(name, :from => name)
end
end
end
end
# Transforms the state name into a more human-readable format, such as
# "first gear" instead of "first_gear"
def human_name(klass = @machine.owner_class)
@human_name.is_a?(Proc) ? @human_name.call(self, klass) : @human_name
end
# Generates a human-readable description of this state's name / value:
#
# For example,
#
# State.new(machine, :parked).description # => "parked"
# State.new(machine, :parked, :value => :parked).description # => "parked"
# State.new(machine, :parked, :value => nil).description # => "parked (nil)"
# State.new(machine, :parked, :value => 1).description # => "parked (1)"
# State.new(machine, :parked, :value => lambda {Time.now}).description # => "parked (*)
#
# Configuration options:
# * <tt>:human_name</tt> - Whether to use this state's human name in the
# description or just the internal name
def description(options = {})
label = options[:human_name] ? human_name : name
description = label ? label.to_s : label.inspect
description << " (#{@value.is_a?(Proc) ? '*' : @value.inspect})" unless name.to_s == @value.to_s
description
end
# The value that represents this state. This will optionally evaluate the
# original block if it's a lambda block. Otherwise, the static value is
# returned.
#
# For example,
#
# State.new(machine, :parked, :value => 1).value # => 1
# State.new(machine, :parked, :value => lambda {Time.now}).value # => Tue Jan 01 00:00:00 UTC 2008
# State.new(machine, :parked, :value => lambda {Time.now}).value(false) # => <Proc:0xb6ea7ca0@...>
def value(eval = true)
if @value.is_a?(Proc) && eval
if cache_value?
@value = @value.call
machine.states.update(self)
@value
else
@value.call
end
else
@value
end
end
# Determines whether this state matches the given value. If no matcher is
# configured, then this will check whether the values are equivalent.
# Otherwise, the matcher will determine the result.
#
# For example,
#
# # Without a matcher
# state = State.new(machine, :parked, :value => 1)
# state.matches?(1) # => true
# state.matches?(2) # => false
#
# # With a matcher
# state = State.new(machine, :parked, :value => lambda {Time.now}, :if => lambda {|value| !value.nil?})
# state.matches?(nil) # => false
# state.matches?(Time.now) # => true
def matches?(other_value)
matcher ? matcher.call(other_value) : other_value == value
end
# Defines a context for the state which will be enabled on instances of
# the owner class when the machine is in this state.
#
# This can be called multiple times. Each time a new context is created,
# a new module will be included in the owner class.
def context(&block)
# Include the context
context = @context
machine.owner_class.class_eval { include context }
# Evaluate the method definitions and track which ones were added
old_methods = context_methods
context.class_eval(&block)
new_methods = context_methods.to_a.select { |(name, method)| old_methods[name] != method }
# Alias new methods so that the only execute when the object is in this state
new_methods.each do |(method_name, _method)|
context_name = context_name_for(method_name)
context.class_eval <<-end_eval, __FILE__, __LINE__ + 1
alias_method :"#{context_name}", :#{method_name}
def #{method_name}(*args, &block)
state = self.class.state_machine(#{machine.name.inspect}).states.fetch(#{name.inspect})
options = {:method_missing => lambda {super(*args, &block)}, :method_name => #{method_name.inspect}}
state.call(self, :"#{context_name}", *(args + [options]), &block)
end
end_eval
end
true
end
# The list of methods that have been defined in this state's context
# Calls a method defined in this state's context on the given object. All
# arguments and any block will be passed into the method defined.
#
# If the method has never been defined for this state, then a NoMethodError
# will be raised.
def call(object, method, *args, &block)
options = args.last.is_a?(Hash) ? args.pop : {}
options = {:method_name => method}.merge(options)
state = machine.states.match!(object)
if state == self && object.respond_to?(method)
object.send(method, *args, &block)
elsif method_missing = options[:method_missing]
# Dispatch to the superclass since the object either isn't in this state
# or this state doesn't handle the method
begin
method_missing.call
rescue NoMethodError => ex
if ex.name.to_s == options[:method_name].to_s && ex.args == args
# No valid context for this method
raise InvalidContext.new(object, "State #{state.name.inspect} for #{machine.name.inspect} is not a valid context for calling ##{options[:method_name]}")
else
raise
end
end
end
end
def draw(graph, options = {})
fail NotImplementedError
end
# Generates a nicely formatted description of this state's contents.
#
# For example,
#
# state = StateMachines::State.new(machine, :parked, :value => 1, :initial => true)
# state # => #<StateMachines::State name=:parked value=1 initial=true context=[]>
def inspect
attributes = [[:name, name], [:value, @value], [:initial, initial?]]
"#<#{self.class} #{attributes.map { |attr, value| "#{attr}=#{value.inspect}" } * ' '}>"
end
private
# Should the value be cached after it's evaluated for the first time?
def cache_value?
@cache
end
# Adds a predicate method to the owner class so long as a name has
# actually been configured for the state
def add_predicate
# Checks whether the current value matches this state
machine.define_helper(:instance, "#{qualified_name}?") do |machine, object|
machine.states.matches?(object, name)
end
end
# Generates the name of the method containing the actual implementation
def context_name_for(method)
:"__#{machine.name}_#{name}_#{method}_#{@context.object_id}__"
end
end
|
ideonetwork/lato-blog | app/models/lato_blog/category/serializer_helpers.rb | LatoBlog.Category::SerializerHelpers.serialize_base | ruby | def serialize_base
serialized = {}
# set basic info
serialized[:id] = id
serialized[:title] = title
serialized[:meta_language] = meta_language
serialized[:meta_permalink] = meta_permalink
# return serialized category
serialized
end | This function serializes a basic version of the category. | train | https://github.com/ideonetwork/lato-blog/blob/a0d92de299a0e285851743b9d4a902f611187cba/app/models/lato_blog/category/serializer_helpers.rb#L28-L39 | module Category::SerializerHelpers
# This function serializes a complete version of the category.
def serialize
serialized = {}
# set basic info
serialized[:id] = id
serialized[:title] = title
serialized[:meta_language] = meta_language
serialized[:meta_permalink] = meta_permalink
# add category father informations
serialized[:category_father] = category_father ? category_father.serialize_base : nil
# add category children informations
serialized[:category_children] = serialize_category_children
# add category parent informations
serialized[:other_informations] = serialize_other_informations
# return serialized post
serialized
end
# This function serializes a basic version of the category.
private
def serialize_category_children
serialized = {}
category_children.each do |category|
serialized[category.id] = category.serialize_base
end
serialized
end
def serialize_other_informations
serialized = {}
# set translations links
serialized[:translations] = {}
category_parent.categories.each do |category|
next if category.id == id
serialized[:translations][category.meta_language] = category.serialize_base
end
# return serialzed informations
serialized
end
end
|
senchalabs/jsduck | lib/jsduck/tag/member_tag.rb | JsDuck::Tag.MemberTag.member_params | ruby | def member_params(params)
ps = Array(params).map do |p|
p[:optional] ? "[#{p[:name]}]" : p[:name]
end.join(", ")
"( <span class='pre'>#{ps}</span> )"
end | Creates HTML listing of parameters.
When called with nil, creates empty params list.
A helper method for use in #to_html. | train | https://github.com/senchalabs/jsduck/blob/febef5558ecd05da25f5c260365acc3afd0cafd8/lib/jsduck/tag/member_tag.rb#L121-L127 | class MemberTag < Tag
# Defines a class member type and specifies various settings. For
# example:
#
# {
# :title => "Events",
# :position => MEMBER_POS_EVENT,
# # The following are optional
# :toolbar_title => "Events",
# :icon => File.dirname(__FILE__) + "/icons/event.png",
# :subsections => [
# {:title => "Static events",
# :filter => {:static => false},
# :default => true},
# {:title => "Instance events",
# :filter => {:static => true}},
# ]
# }
#
# Position defines the ordering of member section in final HTML
# output.
#
# Title is shown at the top of each such section and also as a
# label on Docs app toolbar button unless :toolbar_title is
# specified.
#
# Icon defines a file to be used as member icon in various places
# of the docs app.
#
# Subsections allows splitting the list of members to several
# subgroups. For example methods get split into static and
# instance methods.
#
# - The :filter field defines how to filter out the members for
# this subcategory. :static=>true filters out all members that
# have a :static field with a truthy value. Conversely,
# :static=>false filters out members not having a :static field
# or having it with a falsy value.
#
# - Setting :default=>true will hide the subsection title when all
# the members end up in that subsection. For example when there
# are only instance methods, the docs will only contain the
# section title "Methods", as by default one should assume all
# methods are instance methods if not stated otherwise.
#
attr_reader :member_type
# Avoid already-defined-constant warnings in Ruby 1.8
unless defined?(MEMBER_POS_CFG)
MEMBER_POS_CFG = 1
MEMBER_POS_PROPERTY = 2
MEMBER_POS_METHOD = 3
MEMBER_POS_EVENT = 4
MEMBER_POS_CSS_VAR = 5
MEMBER_POS_CSS_MIXIN = 6
end
# Extracts the fields auto-detected from code that are relevant to
# the member type and returns a hash with them.
#
# The implementation here extracts fields applicable to all member
# types. When additional member-specific fields are to be
# extracted, override this method, but be sure to call the
# superclass method too.
#
# For example inside Method tag we might additionally want to
# extract :type and :default:
#
# def process_code(code)
# h = super(code)
# h[:type] = code[:type]
# h[:default] = code[:default]
# h
# end
#
def process_code(code)
return {
:tagname => code[:tagname],
# An auto-detected name might be "MyClass.prototype.myMethod" -
# for member name we only want the last "myMethod" part.
:name => code[:name] ? code[:name].split(/\./).last : nil,
:autodetected => code[:autodetected],
:inheritdoc => code[:inheritdoc],
:static => code[:static],
:private => code[:private],
:inheritable => code[:inheritable],
:linenr => code[:linenr],
}
end
# Merges documentation and code hashes into the result hash.
def merge(hash, docs, code)
end
# This method defines the signature-line of the member.
# For example it might return something like this:
#
# "apply(source, target) : Object"
#
# Use #member_link method to render the member name as link in a
# standard way. Similarly there's helper method #member_params
# for rendering the parameter list.
def to_html(context, cls)
end
# Creates HTML link to the given member.
# A helper method for use in #to_html.
def member_link(member)
JsDuck::Render::SignatureUtil::link(member[:owner], member[:id], member[:name])
end
# Creates HTML listing of parameters.
# When called with nil, creates empty params list.
# A helper method for use in #to_html.
end
|
nafu/aws_sns_manager | lib/aws_sns_manager/client.rb | AwsSnsManager.Client.normal_notification | ruby | def normal_notification(text, options = {})
base = {
aps: {
alert: {
title: nil,
subtitle: nil,
body: text
},
sound: options.delete(:sound),
badge: 1,
'mutable-content': 1,
'content-available': 1
}
}
base.merge(options)
end | rubocop:disable Metrics/MethodLength | train | https://github.com/nafu/aws_sns_manager/blob/9ec6ce1799d1195108e95a1efa2dd21437220a3e/lib/aws_sns_manager/client.rb#L50-L65 | class Client
attr_accessor :client
attr_accessor :arn
def initialize(options = {})
super()
@client = Aws::SNS::Client.new(options)
end
def send(text = nil, options = {}, env = :prod, type = :normal)
message = message(text, options, env, type).to_json
response = publish_rescue(message)
!response.nil?
end
def publish_rescue(message)
client.publish(
target_arn: arn,
message: message,
message_structure: 'json'
)
end
#
# Return json payload
#
# +text+:: Text you want to send
# +options+:: Options you want on payload
# +env+:: Environments :prod, :dev
# +type+:: Notification type :normal, :silent, :nosound
#
def message(text, options = {}, env = :prod, type = :normal)
if type == :normal
data = normal_notification(text, options)
elsif type == :silent
data = silent_notification(text, options)
elsif type == :nosound
data = nosound_notification(text, options)
end
return dev_json(data) if env == :dev
prod_json(data)
end
# rubocop:disable Metrics/MethodLength
# rubocop:enable Metrics/MethodLength
def silent_notification(_text, options = {})
base = {
aps: {
'mutable-content': 1,
'content-available': 1
}
}
base.merge(options)
end
# rubocop:disable Metrics/MethodLength
def nosound_notification(text, options = {})
base = {
aps: {
alert: {
title: nil,
subtitle: nil,
body: text
},
badge: 1,
'mutable-content': 1,
'content-available': 1
}
}
base.merge(options)
end
# rubocop:enable Metrics/MethodLength
private
def dev_json(data)
{ default: data.to_json, APNS_SANDBOX: data.to_json }
end
def prod_json(data)
{ default: data.to_json, APNS: data.to_json, GCM: { data: data }.to_json }
end
end
|
lostisland/faraday | lib/faraday/utils.rb | Faraday.Utils.URI | ruby | def URI(url) # rubocop:disable Naming/MethodName
if url.respond_to?(:host)
url
elsif url.respond_to?(:to_str)
default_uri_parser.call(url)
else
raise ArgumentError, 'bad argument (expected URI object or URI string)'
end
end | Normalize URI() behavior across Ruby versions
url - A String or URI.
Returns a parsed URI. | train | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/utils.rb#L55-L63 | module Utils
module_function
def build_query(params)
FlatParamsEncoder.encode(params)
end
def build_nested_query(params)
NestedParamsEncoder.encode(params)
end
ESCAPE_RE = /[^a-zA-Z0-9 .~_-]/.freeze
def escape(str)
str.to_s.gsub(ESCAPE_RE) do |match|
'%' + match.unpack('H2' * match.bytesize).join('%').upcase
end.tr(' ', '+')
end
def unescape(str)
CGI.unescape str.to_s
end
DEFAULT_SEP = /[&;] */n.freeze
# Adapted from Rack
def parse_query(query)
FlatParamsEncoder.decode(query)
end
def parse_nested_query(query)
NestedParamsEncoder.decode(query)
end
def default_params_encoder
@default_params_encoder ||= NestedParamsEncoder
end
class << self
attr_writer :default_params_encoder
end
# Normalize URI() behavior across Ruby versions
#
# url - A String or URI.
#
# Returns a parsed URI.
def default_uri_parser
@default_uri_parser ||= begin
require 'uri'
Kernel.method(:URI)
end
end
def default_uri_parser=(parser)
@default_uri_parser = if parser.respond_to?(:call) || parser.nil?
parser
else
parser.method(:parse)
end
end
# Receives a String or URI and returns just
# the path with the query string sorted.
def normalize_path(url)
url = URI(url)
(url.path.start_with?('/') ? url.path : '/' + url.path) +
(url.query ? "?#{sort_query_params(url.query)}" : '')
end
# Recursive hash update
def deep_merge!(target, hash)
hash.each do |key, value|
target[key] = if value.is_a?(Hash) && target[key].is_a?(Hash)
deep_merge(target[key], value)
else
value
end
end
target
end
# Recursive hash merge
def deep_merge(source, hash)
deep_merge!(source.dup, hash)
end
def sort_query_params(query)
query.split('&').sort.join('&')
end
end
|
murb/workbook | lib/workbook/row.rb | Workbook.Row.push | ruby | def push(cell)
cell = Workbook::Cell.new(cell, {row:self}) unless cell.class == Workbook::Cell
super(cell)
end | Add cell
@param [Workbook::Cell, Numeric,String,Time,Date,TrueClass,FalseClass,NilClass] cell or value to add | train | https://github.com/murb/workbook/blob/2e12f43c882b7c235455192a2fc48183fe6ec965/lib/workbook/row.rb#L67-L70 | class Row < Array
include Workbook::Modules::Cache
alias_method :compare_without_header, :<=>
attr_accessor :placeholder # The placeholder attribute is used in compares (corresponds to newly created or removed lines (depending which side you're on)
attr_accessor :format
# Initialize a new row
#
# @param [Workbook::Row, Array<Workbook::Cell>, Array] cells list of cells to initialize the row with, default is empty
# @param [Workbook::Table] table a row normally belongs to a table, reference it here
# @param [Hash] options Supported options: parse_cells_on_batch_creation (parse cell values during row-initalization, default: false), cell_parse_options (default {}, see Workbook::Modules::TypeParser)
def initialize cells=[], table=nil, options={}
options=options ? {:parse_cells_on_batch_creation=>false,:cell_parse_options=>{},:clone_cells=>false}.merge(options) : {}
cells = [] if cells==nil
self.table= table
cells.each do |c|
if c.is_a? Workbook::Cell
c = c.clone if options[:clone_cells]
else
c = Workbook::Cell.new(c, {row:self})
c.parse!(options[:cell_parse_options]) if options[:parse_cells_on_batch_creation]
end
push c
end
end
# An internal function used in diffs
#
# @return [Boolean] returns true when this row is not an actual row, but a placeholder row to 'compare' against
def placeholder?
placeholder ? true : false
end
# Returns the table this row belongs to
#
# @return [Workbook::Table] the table this row belongs to
def table
@table if defined?(@table)
end
# Set reference to the table this row belongs to without adding the row to the table
#
# @param [Workbook::Table] t the table this row belongs to
def set_table(t)
@table = t
end
# Set reference to the table this row belongs to and add the row to this table
#
# @param [Workbook::Table] t the table this row belongs to
def table= t
raise ArgumentError, "table should be a Workbook::Table (you passed a #{t.class})" unless t.is_a?(Workbook::Table) or t == nil
if t
@table = t
table.push(self) #unless table.index(self) and self.placeholder?
end
end
# Add cell
# @param [Workbook::Cell, Numeric,String,Time,Date,TrueClass,FalseClass,NilClass] cell or value to add
# Add cell
# @param [Workbook::Cell, Numeric,String,Time,Date,TrueClass,FalseClass,NilClass] cell or value to add
def <<(cell)
cell = Workbook::Cell.new(cell, {row:self}) unless cell.class == Workbook::Cell
super(cell)
end
# plus
# @param [Workbook::Row, Array] row to add
# @return [Workbook::Row] a new row, not linked to the table
def +(row)
rv = super(row)
rv = Workbook::Row.new(rv) unless rv.class == Workbook::Row
return rv
end
# concat
# @param [Workbook::Row, Array] row to add
# @return [self] self
def concat(row)
row = Workbook::Row.new(row) unless row.class == Workbook::Row
super(row)
end
# Overrides normal Array's []-function with support for symbols that identify a column based on the header-values and / or
#
# @example Lookup using fixnum or header value encoded as symbol
# row[1] #=> <Cell value="a">
# row["A"] #=> <Cell value="a">
# row[:a] #=> <Cell value="a">
#
# @param [Fixnum, Symbol, String] index_or_hash that identifies the column (strings are converted to symbols)
# @return [Workbook::Cell, nil]
def [](index_or_hash)
if index_or_hash.is_a? Symbol
rv = nil
begin
rv = to_hash[index_or_hash]
rescue NoMethodError
end
return rv
elsif index_or_hash.is_a? String and index_or_hash.match(/^[A-Z]*$/)
# it looks like a column indicator
return to_a[Workbook::Column.alpha_index_to_number_index(index_or_hash)]
elsif index_or_hash.is_a? String
symbolized = Workbook::Cell.new(index_or_hash, {row:self}).to_sym
self[symbolized]
else
if index_or_hash
return to_a[index_or_hash]
end
end
end
# Overrides normal Array's []=-function with support for symbols that identify a column based on the header-values
#
# @example Lookup using fixnum or header value encoded as symbol (strings are converted to symbols)
# row[1] #=> <Cell value="a">
# row[:a] #=> <Cell value="a">
#
# @param [Fixnum, Symbol, String] index_or_hash that identifies the column
# @param [String, Fixnum, NilClass, Date, DateTime, Time, Float] value
# @return [Workbook::Cell, nil]
def []= index_or_hash, value
index = index_or_hash
if index_or_hash.is_a? Symbol
index = table_header_keys.index(index_or_hash)
elsif index_or_hash.is_a? String and index_or_hash.match(/^[A-Z]*$/)
# it looks like a column indicator
index = Workbook::Column.alpha_index_to_number_index(index_or_hash)
elsif index_or_hash.is_a? String
symbolized = Workbook::Cell.new(index_or_hash, {row:self}).to_sym
index = table_header_keys.index(symbolized)
end
value_celled = Workbook::Cell.new
if value.is_a? Workbook::Cell
value_celled = value
else
current_cell = self[index]
if current_cell.is_a? Workbook::Cell
value_celled = current_cell
end
value_celled.value=(value)
end
value_celled.row = self
super(index,value_celled)
end
# Returns an array of cells allows you to find cells by a given color, normally a string containing a hex
#
# @param [String] color a CSS-style hex-string
# @param [Hash] options Option :hash_keys (default true) returns row as an array of symbols
# @return [Array<Symbol>, Workbook::Row<Workbook::Cell>]
def find_cells_by_background_color color=:any, options={}
options = {:hash_keys=>true}.merge(options)
cells = self.collect {|c| c if c.format.has_background_color?(color) }.compact
r = Row.new cells
options[:hash_keys] ? r.to_symbols : r
end
# Returns true when the row belongs to a table and it is the header row (typically the first row)
#
# @return [Boolean]
def header?
table != nil and self.object_id == table_header.object_id
end
# Is this the first row in the table
#
# @return [Boolean, NilClass] returns nil if it doesn't belong to a table, false when it isn't the first row of a table and true when it is.
def first?
table != nil and self.object_id == table.first.object_id
end
# Returns true when all the cells in the row have values whose to_s value equals an empty string
#
# @return [Boolean]
def no_values?
all? {|c| c.value.to_s == ''}
end
# Converts a row to an array of symbol representations of the row content, see also: Workbook::Cell#to_sym
# @return [Array<Symbol>] returns row as an array of symbols
def to_symbols
fetch_cache(:to_symbols){
collect{|c| c.to_sym}
}
end
# Converts the row to an array of Workbook::Cell's
# @return [Array<Workbook::Cell>] returns row as an array of symbols
def to_a
self.collect{|c| c}
end
def table_header
table.header
end
def table_header_keys
table_header.to_symbols
end
# Returns a hash representation of this row
#
# @return [Hash]
def to_hash
keys = table_header_keys
values = self
hash = {}
keys.each_with_index {|k,i| hash[k]=values[i]}
return hash
end
# Quick assessor to the book's template, if it exists
#
# @return [Workbook::Template]
def template
table.template if table
end
# Returns a hash representation of this row
#
# it differs from #to_hash as it doesn't contain the Workbook's Workbook::Cell-objects,
# but the actual values contained in these cells
#
# @return [Hash]
def to_hash_with_values
keys = table_header_keys
values = self
@hash_with_values = {}
keys.each_with_index {|k,i| v=values[i]; v=v.value if v; @hash_with_values[k]=v}
return @hash_with_values
end
# Compares one row wiht another
#
# @param [Workbook::Row] other row to compare against
# @return [Workbook::Row] a row with the diff result.
def <=> other
a = self.header? ? 0 : 1
b = other.header? ? 0 : 1
return (a <=> b) if (a==0 or b==0)
compare_without_header other
end
# The first cell of the row is considered to be the key
#
# @return [Workbook::Cell] the key cell
def key
first
end
# Compact detaches the row from the table
def compact
r = self.clone
r = r.collect{|c| c unless c.nil?}.compact
end
# clone the row with together with the cells
#
# @return [Workbook::Row] a cloned copy of self with cells
def clone
Workbook::Row.new(self, nil, {:clone_cells=>true})
end
# remove all the trailing nil-cells (returning a trimmed clone)
#
# @param [Integer] desired_length of the new row
# @return [Workbook::Row] a trimmed clone of the array
def trim(desired_length=nil)
self.clone.trim!(desired_length)
end
# remove all the trailing nil-cells (returning a trimmed self)
#
# @param [Integer] desired_length of the new row
# @return [Workbook::Row] self
def trim!(desired_length=nil)
self_count = self.count-1
self.count.times do |index|
index = self_count - index
if desired_length and index < desired_length
break
elsif desired_length and index >= desired_length
self.delete_at(index)
elsif self[index].nil?
self.delete_at(index)
else
break
end
end
(desired_length - self.count).times{|a| self << (Workbook::Cell.new(nil))} if desired_length and (desired_length - self.count) > 0
self
end
end
|
oleganza/btcruby | lib/btcruby/open_assets/asset_processor.rb | BTC.AssetProcessor.verify_asset_transaction | ruby | def verify_asset_transaction(asset_transaction)
raise ArgumentError, "Asset Transaction is missing" if !asset_transaction
# Perform a depth-first scanning.
# When completed, we'll only have transactions that have all previous txs fully verified.
atx_stack = [ asset_transaction ]
i = 0
max_size = atx_stack.size
while i < atx_stack.size # note: array dynamically changes as we scan it
atx = atx_stack[i]
BTC::Invariant(atx.is_a?(AssetTransaction), "Must be AssetTransaction instance")
more_atxs_to_verify = partial_verify_asset_transaction(atx)
if more_atxs_to_verify == nil # Validation failed - return immediately
return false
elsif more_atxs_to_verify.size == 0 # atx is fully verifiable (issue-only or we used cached parents), remove it from the list
# outputs may not be all verified because they can be transfers with cached verified inputs.
# so we need to verify local balance and apply asset ids from inputs to outputs
if !verify_transfers(atx)
return false
end
if i == 0 # that was the topmost transaction, we are done.
return true
end
@source.asset_transaction_was_verified(atx)
atx_stack.delete_at(i)
i -= 1
# If this is was the last parent to check, then the previous item would be :parents marker
# Once we validate the child behind that marker, we might have another child or the marker again.
# Unroll the stack until the previous item is not a child with all parents verified.
while (child_atx = atx_stack[i]) == :parents
BTC::Invariant(i >= 1, ":parents marker should be preceded by an asset transaction")
atx_stack.delete_at(i)
i -= 1
child_atx = atx_stack.delete_at(i)
# Now all inputs are verified, we only need to verify the transfer outputs against them.
# This will make outputs verified for the later transactions (earlier in the list).
if !verify_transfers(child_atx)
return false
end
# Now transaction is fully verified.
# Source can cache it if needed.
@source.asset_transaction_was_verified(child_atx)
if i == 0 # this was the topmost child, return
return true
end
i -= 1
end
else
# we have more things to verify - dig inside these transactions
# Start with the last one so once any tx is verifed, we can move back and color inputs of the child transaction.
atx_stack.insert(i+1, :parents, *more_atxs_to_verify)
max_size = atx_stack.size if atx_stack.size > max_size
j = i
i += more_atxs_to_verify.size + 1
end
end
return true
end | Scans backwards and validates every AssetTransaction on the way.
Does not verify Bitcoin transactions (assumes amounts and scripts are already validated).
Updates verified flags on the asset transaction.
Returns `true` if asset transaction is verified succesfully.
Returns `false` otherwise. | train | https://github.com/oleganza/btcruby/blob/0aa0231a29dfc3c9f7fc54b39686aed10b6d9808/lib/btcruby/open_assets/asset_processor.rb#L27-L91 | class AssetProcessor
# AssetProcessorSource instance that provides transactions.
attr_accessor :source
# Network to use for encoding AssetIDs. Default is `Network.default`.
attr_accessor :network
def initialize(source: nil, network: nil)
raise ArgumentError, "Source is missing." if !source
@source = source
@network = network || Network.default
end
# Scans backwards and validates every AssetTransaction on the way.
# Does not verify Bitcoin transactions (assumes amounts and scripts are already validated).
# Updates verified flags on the asset transaction.
# Returns `true` if asset transaction is verified succesfully.
# Returns `false` otherwise.
def color_transaction_inputs(atx)
atx.inputs.each do |ain|
if !ain.verified?
prev_atx = ain.previous_asset_transaction
BTC::Invariant(!!prev_atx, "Must have previous asset transaction")
if !prev_atx.verified?
#puts "\n Prev ATX not fully verified: #{prev_atx.inspect} -> input #{ain.index} of #{atx.inspect}"
end
BTC::Invariant(prev_atx.verified?, "Must have previous asset transaction outputs fully verified")
output = prev_atx.outputs[ain.transaction_input.previous_index]
BTC::Invariant(output && output.verified?, "Must have a valid reference to a previous verified output")
# Copy over color information. The output can be colored or uncolored.
ain.asset_id = output.asset_id
ain.value = output.value
ain.verified = true
end
end
end
# Returns a list of asset transactions remaining to verify.
# Returns an empty array if verification succeeded and there is nothing more to verify.
# Returns `nil` if verification failed.
def partial_verify_asset_transaction(asset_transaction)
raise ArgumentError, "Asset Transaction is missing" if !asset_transaction
# 1. Verify issuing transactions and collect transfer outputs
cache_transactions do
if !verify_issues(asset_transaction)
return nil
end
# No transfer outputs, this transaction is verified.
# If there are assets on some inputs, they are destroyed.
if asset_transaction.transfer_outputs.size == 0
# We keep inputs unverified to indicate that they were not even processed.
return []
end
# 2. Fetch parent transactions to verify.
# * Verify inputs from non-OpenAsset transactions.
# * Return OA transactions for verification.
# * Link each input to its OA transaction output.
prev_unverified_atxs_by_hash = {}
asset_transaction.inputs.each do |ain|
txin = ain.transaction_input
# Ask source if it has a cached verified transaction for this input.
prev_atx = @source.verified_asset_transaction_for_hash(txin.previous_hash)
if prev_atx
BTC::Invariant(prev_atx.verified?, "Cached verified tx must be fully verified")
end
prev_atx ||= prev_unverified_atxs_by_hash[txin.previous_hash]
if !prev_atx
prev_tx = transaction_for_input(txin)
if !prev_tx
Diagnostics.current.add_message("Failed to load previous transaction for input #{ain.index}: #{txin.previous_id}")
return nil
end
begin
prev_atx = AssetTransaction.new(transaction: prev_tx)
prev_unverified_atxs_by_hash[prev_atx.transaction_hash] = prev_atx
rescue FormatError => e
# Previous transaction is not a valid Open Assets transaction,
# so we mark the input as uncolored and verified as such.
ain.asset_id = nil
ain.value = nil
ain.verified = true
end
end
# Remember a reference to this transaction so we can validate the whole `asset_transaction` when all previous ones are set and verified.
ain.previous_asset_transaction = prev_atx
end # each input
# Return all unverified transactions.
# Note: this won't include the already verified one.
prev_unverified_atxs_by_hash.values
end
end
# Attempts to verify issues. Fetches parent transactions to determine AssetID.
# Returns `true` if verified all issue outputs.
# Returns `false` if previous tx defining AssetID is not found.
def verify_issues(asset_transaction)
previous_txout = nil # fetch only when we have > 0 issue outputs
asset_transaction.outputs.each do |aout|
if !aout.verified?
if aout.value && aout.value > 0
if aout.issue?
previous_txout ||= transaction_output_for_input(asset_transaction.inputs[0].transaction_input)
if !previous_txout
Diagnostics.current.add_message("Failed to assign AssetID to issue output #{aout.index}: can't find output for input #0")
return false
end
aout.asset_id = AssetID.new(script: previous_txout.script, network: self.network)
# Output issues some known asset and amount and therefore it is verified.
aout.verified = true
else
# Transfer outputs must be matched with known asset ids on the inputs.
end
else
# Output without a value is uncolored.
aout.asset_id = nil
aout.value = nil
aout.verified = true
end
end
end
true
end # verify_issues
# Attempts to verify transfer transactions assuming all inputs are verified.
# Returns `true` if all transfers are verified (also updates `verified` and `asset_id` on them).
# Returns `false` if any transfer is invalid or some inputs are not verified.
def verify_transfers(asset_transaction)
# Do not verify colors on inputs if no transfers occur.
# Typically it's an issuance tx. If there are assets on inputs, they are destroyed.
if asset_transaction.transfer_outputs.size == 0
return true
end
color_transaction_inputs(asset_transaction)
current_asset_id = nil
inputs = asset_transaction.inputs.dup
current_asset_id = nil
current_input = nil
current_input_remainder = 0
asset_transaction.outputs.each do |aout|
# Only check outputs that can be colored (value > 0) and are transfer outputs (after the marker)
if aout.has_value? && !aout.marker? && !aout.issue?
aout.asset_id = nil
remaining_value = aout.value
# Try to fill in the output with available inputs.
while remaining_value > 0
BTC::Invariant((current_input_remainder == 0) ? (current_input == nil) : true,
"Remainder must be == 0 only when current_input is nil")
BTC::Invariant((current_input_remainder > 0) ? (current_input && current_input.colored?) : true,
"Remainder must be > 0 only when transfer input is colored")
current_input ||= inputs.shift
# skip uncolored inputs
while current_input && !current_input.colored?
current_input = inputs.shift
end
if !current_input
Diagnostics.current.add_message("Failed to assign AssetID to transfer output #{aout.index}: not enough colored inputs (#{remaining_value} missing for output #{aout.index}).")
return false
end
# Need to consume aout.value units from inputs and extract the asset ID
if !current_input.verified?
Diagnostics.current.add_message("Failed to assign AssetID to transfer output #{aout.index}: input #{current_input.index} is not verified.")
return false
end
# Make sure asset ID matches.
# If output gets assigned units from 2 or more inputs, all asset ids must be the same.
if !aout.asset_id
aout.asset_id = current_input.asset_id
else
if aout.asset_id != current_input.asset_id
Diagnostics.current.add_message("Failed to assign AssetID to transfer output #{aout.index}: already assigned another AssetID.")
return false
end
end
# If we have remainder from the previous output, use it.
# Otherwise use the whole input's value.
qty = if current_input_remainder > 0
current_input_remainder
else
current_input.value
end
if qty <= remaining_value
remaining_value -= qty
# choose next input, clear remainder.
current_input_remainder = 0
current_input = nil
else
current_input_remainder = qty - remaining_value
remaining_value = 0
# keep the current input to use with `current_input_remainder` in the next output
end
end # filling in the output
aout.verified = true
BTC::Invariant(aout.verified && aout.asset_id && aout.value > 0, "Transfer output should be fully verified")
end # only transfer outputs with value > 0
end # each output
# Some inputs may have remained. If those have some assets, they'll be destroyed.
if current_input_remainder > 0
Diagnostics.current.add_message("Warning: #{current_input_remainder} units left over from input #{current_input.index} will be destroyed.")
end
while current_input = inputs.shift
if current_input.colored?
Diagnostics.current.add_message("Warning: #{current_input.value} units from input #{current_input.index} will be destroyed.")
end
end
return true
end # verify_transfers
# scans forward and validates every AssetTransaction on the way.
def discover_asset(asset_id: nil)
# TODO: ...
end
protected
def cache_transactions(&block)
begin
@cached_txs_depth ||= 0
@cached_txs_depth += 1
@cached_txs ||= {}
result = yield
ensure
@cached_txs_depth -= 1
@cached_txs = nil if @cached_txs_depth <= 0
end
result
end
def transaction_for_input(txin)
transaction_for_hash(txin.previous_hash)
end
def transaction_output_for_input(txin)
tx = transaction_for_input(txin)
if tx
tx.outputs[txin.previous_index]
else
nil
end
end
def transaction_for_hash(hash)
if @cached_txs && (tx = @cached_txs[hash])
return tx
end
tx = @source.transaction_for_hash(hash)
if @cached_txs && tx
@cached_txs[tx.transaction_hash] = tx
end
tx
end
end
|
oleganza/btcruby | lib/btcruby/data.rb | BTC.Data.ensure_ascii_compatible_encoding | ruby | def ensure_ascii_compatible_encoding(string, options = nil)
if string.encoding.ascii_compatible?
string
else
string.encode(Encoding::UTF_8, options || {:invalid => :replace, :undef => :replace})
end
end | Returns string as-is if it is ASCII-compatible
(that is, if you are interested in 7-bit characters exposed as #bytes).
If it is not, attempts to transcode to UTF8 replacing invalid characters if there are any.
If options are not specified, uses safe default that replaces unknown characters with standard character.
If options are specified, they are used as-is for String#encode method. | train | https://github.com/oleganza/btcruby/blob/0aa0231a29dfc3c9f7fc54b39686aed10b6d9808/lib/btcruby/data.rb#L91-L97 | module Data
extend self
HEX_PACK_CODE = "H*".freeze
BYTE_PACK_CODE = "C*".freeze
# Generates a secure random number of a given length
def random_data(length = 32)
SecureRandom.random_bytes(length)
end
# Converts hexadecimal string to a binary data string.
def data_from_hex(hex_string)
raise ArgumentError, "Hex string is missing" if !hex_string
hex_string = hex_string.strip
data = [hex_string].pack(HEX_PACK_CODE)
if hex_from_data(data) != hex_string.downcase # invalid hex string was detected
raise FormatError, "Hex string is invalid: #{hex_string.inspect}"
end
return data
end
# Converts binary string to lowercase hexadecimal representation.
def hex_from_data(data)
raise ArgumentError, "Data is missing" if !data
return data.unpack(HEX_PACK_CODE).first
end
def to_hex(data)
hex_from_data(data)
end
def from_hex(hex)
data_from_hex(hex)
end
# Converts a binary string to an array of bytes (list of integers).
# Returns a much more efficient slice of bytes if offset/limit or
# range are specified. That is, avoids converting the entire buffer to byte array.
#
# Note 1: if range is specified, it takes precedence over offset/limit.
#
# Note 2: byteslice(...).bytes is less efficient as it creates
# an intermediate shorter string.
#
def bytes_from_data(data, offset: 0, limit: nil, range: nil)
raise ArgumentError, "Data is missing" if !data
if offset == 0 && limit == nil && range == nil
return data.bytes
end
if range
offset = range.begin
limit = range.size
end
bytes = []
data.each_byte do |byte|
if offset > 0
offset -= 1
else
if !limit || limit > 0
bytes << byte
limit -= 1 if limit
else
break
end
end
end
bytes
end
# Converts binary string to an array of bytes (list of integers).
def data_from_bytes(bytes)
raise ArgumentError, "Bytes are missing" if !bytes
bytes.pack(BYTE_PACK_CODE)
end
# Returns string as-is if it is ASCII-compatible
# (that is, if you are interested in 7-bit characters exposed as #bytes).
# If it is not, attempts to transcode to UTF8 replacing invalid characters if there are any.
# If options are not specified, uses safe default that replaces unknown characters with standard character.
# If options are specified, they are used as-is for String#encode method.
# Returns string as-is if it is already encoded in binary encoding (aka BINARY or ASCII-8BIT).
# If it is not, converts to binary by calling stdlib's method #b.
def ensure_binary_encoding(string)
raise ArgumentError, "String is missing" if !string
if string.encoding == Encoding::BINARY
string
else
string.b
end
end
end
|
rjurado01/rapidoc | lib/rapidoc/resource_doc.rb | Rapidoc.ResourceDoc.generate_info | ruby | def generate_info( routes_info )
if routes_info
extractor = get_controller_extractor
@description = extractor.get_resource_info['description'] if extractor
@actions_doc = get_actions_doc( routes_info, extractor )
# template need that description will be an array
@description = [ @description ] unless @description.class == Array
end
end | Create description and actions_doc | train | https://github.com/rjurado01/rapidoc/blob/03b7a8f29a37dd03f4ed5036697b48551d3b4ae6/lib/rapidoc/resource_doc.rb#L36-L45 | class ResourceDoc
attr_reader :name, :description, :controller_file, :actions_doc
##
# @param resource_name [String] resource name
# @param routes_doc [RoutesDoc] routes documentation
#
def initialize( resource_name, routes_actions_info )
@name = resource_name.to_s.split('/').last
@controller_file = resource_name.to_s.pluralize + '_controller' + controllers_extension
generate_info routes_actions_info
end
##
# Names with '/' caracter produce problems in html ids
#
def simple_name
return self.name.delete '/'
end
private
##
# Create description and actions_doc
#
##
# @return [ControllerExtractor] extractor that allow read controller files
# and extract action and resource info from them
#
def get_controller_extractor
if File.exists? controller_dir( @controller_file )
ControllerExtractor.new @controller_file
else
nil
end
end
##
# @return [Array] all the resource ActionDoc
#
def get_actions_doc( routes_actions_info, extractor )
routes_actions_info.map do |route_info|
controller_info = extractor ? extractor.get_action_info( route_info[:action] ) : nil
ActionDoc.new( route_info, controller_info, examples_dir )
end
end
end
|
stripe/stripe-ruby | lib/stripe/stripe_object.rb | Stripe.StripeObject.empty_values | ruby | def empty_values(obj)
values = case obj
when Hash then obj
when StripeObject then obj.instance_variable_get(:@values)
else
raise ArgumentError, "#empty_values got unexpected object type: #{obj.class.name}"
end
values.each_with_object({}) do |(k, _), update|
update[k] = ""
end
end | Returns a hash of empty values for all the values that are in the given
StripeObject. | train | https://github.com/stripe/stripe-ruby/blob/322a8c60be8a9b9ac8aad8857864680a32176935/lib/stripe/stripe_object.rb#L556-L567 | class StripeObject
include Enumerable
@@permanent_attributes = Set.new([:id])
# The default :id method is deprecated and isn't useful to us
undef :id if method_defined?(:id)
# Sets the given parameter name to one which is known to be an additive
# object.
#
# Additive objects are subobjects in the API that don't have the same
# semantics as most subobjects, which are fully replaced when they're set.
# This is best illustrated by example. The `source` parameter sent when
# updating a subscription is *not* additive; if we set it:
#
# source[object]=card&source[number]=123
#
# We expect the old `source` object to have been overwritten completely. If
# the previous source had an `address_state` key associated with it and we
# didn't send one this time, that value of `address_state` is gone.
#
# By contrast, additive objects are those that will have new data added to
# them while keeping any existing data in place. The only known case of its
# use is for `metadata`, but it could in theory be more general. As an
# example, say we have a `metadata` object that looks like this on the
# server side:
#
# metadata = { old: "old_value" }
#
# If we update the object with `metadata[new]=new_value`, the server side
# object now has *both* fields:
#
# metadata = { old: "old_value", new: "new_value" }
#
# This is okay in itself because usually users will want to treat it as
# additive:
#
# obj.metadata[:new] = "new_value"
# obj.save
#
# However, in other cases, they may want to replace the entire existing
# contents:
#
# obj.metadata = { new: "new_value" }
# obj.save
#
# This is where things get a little bit tricky because in order to clear
# any old keys that may have existed, we actually have to send an explicit
# empty string to the server. So the operation above would have to send
# this form to get the intended behavior:
#
# metadata[old]=&metadata[new]=new_value
#
# This method allows us to track which parameters are considered additive,
# and lets us behave correctly where appropriate when serializing
# parameters to be sent.
def self.additive_object_param(name)
@additive_params ||= Set.new
@additive_params << name
end
# Returns whether the given name is an additive object parameter. See
# `.additive_object_param` for details.
def self.additive_object_param?(name)
@additive_params ||= Set.new
@additive_params.include?(name)
end
def initialize(id = nil, opts = {})
id, @retrieve_params = Util.normalize_id(id)
@opts = Util.normalize_opts(opts)
@original_values = {}
@values = {}
# This really belongs in APIResource, but not putting it there allows us
# to have a unified inspect method
@unsaved_values = Set.new
@transient_values = Set.new
@values[:id] = id if id
end
def self.construct_from(values, opts = {})
values = Stripe::Util.symbolize_names(values)
# work around protected #initialize_from for now
new(values[:id]).send(:initialize_from, values, opts)
end
# Determines the equality of two Stripe objects. Stripe objects are
# considered to be equal if they have the same set of values and each one
# of those values is the same.
def ==(other)
other.is_a?(StripeObject) && @values == other.instance_variable_get(:@values)
end
# Hash equality. As with `#==`, we consider two equivalent Stripe objects equal.
def eql?(other)
# Defer to the implementation on `#==`.
self == other
end
# As with equality in `#==` and `#eql?`, we hash two Stripe objects to the
# same value if they're equivalent objects.
def hash
@values.hash
end
# Indicates whether or not the resource has been deleted on the server.
# Note that some, but not all, resources can indicate whether they have
# been deleted.
def deleted?
@values.fetch(:deleted, false)
end
def to_s(*_args)
JSON.pretty_generate(to_hash)
end
def inspect
id_string = respond_to?(:id) && !id.nil? ? " id=#{id}" : ""
"#<#{self.class}:0x#{object_id.to_s(16)}#{id_string}> JSON: " + JSON.pretty_generate(@values)
end
# Re-initializes the object based on a hash of values (usually one that's
# come back from an API call). Adds or removes value accessors as necessary
# and updates the state of internal data.
#
# Please don't use this method. If you're trying to do mass assignment, try
# #initialize_from instead.
def refresh_from(values, opts, partial = false)
initialize_from(values, opts, partial)
end
extend Gem::Deprecate
deprecate :refresh_from, "#update_attributes", 2016, 1
# Mass assigns attributes on the model.
#
# This is a version of +update_attributes+ that takes some extra options
# for internal use.
#
# ==== Attributes
#
# * +values+ - Hash of values to use to update the current attributes of
# the object.
# * +opts+ - Options for +StripeObject+ like an API key that will be reused
# on subsequent API calls.
#
# ==== Options
#
# * +:dirty+ - Whether values should be initiated as "dirty" (unsaved) and
# which applies only to new StripeObjects being initiated under this
# StripeObject. Defaults to true.
def update_attributes(values, opts = {}, dirty: true)
values.each do |k, v|
add_accessors([k], values) unless metaclass.method_defined?(k.to_sym)
@values[k] = Util.convert_to_stripe_object(v, opts)
dirty_value!(@values[k]) if dirty
@unsaved_values.add(k)
end
end
def [](k)
@values[k.to_sym]
end
def []=(k, v)
send(:"#{k}=", v)
end
def keys
@values.keys
end
def values
@values.values
end
def to_json(*_a)
JSON.generate(@values)
end
def as_json(*a)
@values.as_json(*a)
end
def to_hash
maybe_to_hash = lambda do |value|
value && value.respond_to?(:to_hash) ? value.to_hash : value
end
@values.each_with_object({}) do |(key, value), acc|
acc[key] = case value
when Array
value.map(&maybe_to_hash)
else
maybe_to_hash.call(value)
end
end
end
def each(&blk)
@values.each(&blk)
end
# Sets all keys within the StripeObject as unsaved so that they will be
# included with an update when #serialize_params is called. This method is
# also recursive, so any StripeObjects contained as values or which are
# values in a tenant array are also marked as dirty.
def dirty!
@unsaved_values = Set.new(@values.keys)
@values.each_value do |v|
dirty_value!(v)
end
end
# Implements custom encoding for Ruby's Marshal. The data produced by this
# method should be comprehendable by #marshal_load.
#
# This allows us to remove certain features that cannot or should not be
# serialized.
def marshal_dump
# The StripeClient instance in @opts is not serializable and is not
# really a property of the StripeObject, so we exclude it when
# dumping
opts = @opts.clone
opts.delete(:client)
[@values, opts]
end
# Implements custom decoding for Ruby's Marshal. Consumes data that's
# produced by #marshal_dump.
def marshal_load(data)
values, opts = data
initialize(values[:id])
initialize_from(values, opts)
end
def serialize_params(options = {})
update_hash = {}
@values.each do |k, v|
# There are a few reasons that we may want to add in a parameter for
# update:
#
# 1. The `force` option has been set.
# 2. We know that it was modified.
# 3. Its value is a StripeObject. A StripeObject may contain modified
# values within in that its parent StripeObject doesn't know about.
#
unsaved = @unsaved_values.include?(k)
if options[:force] || unsaved || v.is_a?(StripeObject)
update_hash[k.to_sym] =
serialize_params_value(@values[k], @original_values[k], unsaved, options[:force], key: k)
end
end
# a `nil` that makes it out of `#serialize_params_value` signals an empty
# value that we shouldn't appear in the serialized form of the object
update_hash.reject! { |_, v| v.nil? }
update_hash
end
class << self
# This class method has been deprecated in favor of the instance method
# of the same name.
def serialize_params(obj, options = {})
obj.serialize_params(options)
end
extend Gem::Deprecate
deprecate :serialize_params, "#serialize_params", 2016, 9
end
# A protected field is one that doesn't get an accessor assigned to it
# (i.e. `obj.public = ...`) and one which is not allowed to be updated via
# the class level `Model.update(id, { ... })`.
def self.protected_fields
[]
end
protected
def metaclass
class << self; self; end
end
def remove_accessors(keys)
# not available in the #instance_eval below
protected_fields = self.class.protected_fields
metaclass.instance_eval do
keys.each do |k|
next if protected_fields.include?(k)
next if @@permanent_attributes.include?(k)
# Remove methods for the accessor's reader and writer.
[k, :"#{k}=", :"#{k}?"].each do |method_name|
next unless method_defined?(method_name)
begin
remove_method(method_name)
rescue NameError
# In some cases there can be a method that's detected with
# `method_defined?`, but which cannot be removed with
# `remove_method`, even though it's on the same class. The only
# case so far that we've noticed this is when a class is
# reopened for monkey patching:
#
# https://github.com/stripe/stripe-ruby/issues/749
#
# Here we swallow that error and issue a warning so at least
# the program doesn't crash.
$stderr.puts("WARNING: Unable to remove method `#{method_name}`; " \
"if custom, please consider renaming to a name that doesn't " \
"collide with an API property name.")
end
end
end
end
end
def add_accessors(keys, values)
# not available in the #instance_eval below
protected_fields = self.class.protected_fields
metaclass.instance_eval do
keys.each do |k|
next if protected_fields.include?(k)
next if @@permanent_attributes.include?(k)
if k == :method
# Object#method is a built-in Ruby method that accepts a symbol
# and returns the corresponding Method object. Because the API may
# also use `method` as a field name, we check the arity of *args
# to decide whether to act as a getter or call the parent method.
define_method(k) { |*args| args.empty? ? @values[k] : super(*args) }
else
define_method(k) { @values[k] }
end
define_method(:"#{k}=") do |v|
if v == ""
raise ArgumentError, "You cannot set #{k} to an empty string. " \
"We interpret empty strings as nil in requests. " \
"You may set (object).#{k} = nil to delete the property."
end
@values[k] = Util.convert_to_stripe_object(v, @opts)
dirty_value!(@values[k])
@unsaved_values.add(k)
end
if [FalseClass, TrueClass].include?(values[k].class)
define_method(:"#{k}?") { @values[k] }
end
end
end
end
def method_missing(name, *args)
# TODO: only allow setting in updateable classes.
if name.to_s.end_with?("=")
attr = name.to_s[0...-1].to_sym
# Pull out the assigned value. This is only used in the case of a
# boolean value to add a question mark accessor (i.e. `foo?`) for
# convenience.
val = args.first
# the second argument is only required when adding boolean accessors
add_accessors([attr], attr => val)
begin
mth = method(name)
rescue NameError
raise NoMethodError, "Cannot set #{attr} on this object. HINT: you can't set: #{@@permanent_attributes.to_a.join(', ')}"
end
return mth.call(args[0])
elsif @values.key?(name)
return @values[name]
end
begin
super
rescue NoMethodError => e
# If we notice the accessed name if our set of transient values we can
# give the user a slightly more helpful error message. If not, just
# raise right away.
raise unless @transient_values.include?(name)
raise NoMethodError, e.message + ". HINT: The '#{name}' attribute was set in the past, however. It was then wiped when refreshing the object with the result returned by Stripe's API, probably as a result of a save(). The attributes currently available on this object are: #{@values.keys.join(', ')}"
end
end
def respond_to_missing?(symbol, include_private = false)
@values && @values.key?(symbol) || super
end
# Re-initializes the object based on a hash of values (usually one that's
# come back from an API call). Adds or removes value accessors as necessary
# and updates the state of internal data.
#
# Protected on purpose! Please do not expose.
#
# ==== Options
#
# * +:values:+ Hash used to update accessors and values.
# * +:opts:+ Options for StripeObject like an API key.
# * +:partial:+ Indicates that the re-initialization should not attempt to
# remove accessors.
def initialize_from(values, opts, partial = false)
@opts = Util.normalize_opts(opts)
# the `#send` is here so that we can keep this method private
@original_values = self.class.send(:deep_copy, values)
removed = partial ? Set.new : Set.new(@values.keys - values.keys)
added = Set.new(values.keys - @values.keys)
# Wipe old state before setting new. This is useful for e.g. updating a
# customer, where there is no persistent card parameter. Mark those values
# which don't persist as transient
remove_accessors(removed)
add_accessors(added, values)
removed.each do |k|
@values.delete(k)
@transient_values.add(k)
@unsaved_values.delete(k)
end
update_attributes(values, opts, dirty: false)
values.each_key do |k|
@transient_values.delete(k)
@unsaved_values.delete(k)
end
self
end
def serialize_params_value(value, original, unsaved, force, key: nil)
if value.nil?
""
# The logic here is that essentially any object embedded in another
# object that had a `type` is actually an API resource of a different
# type that's been included in the response. These other resources must
# be updated from their proper endpoints, and therefore they are not
# included when serializing even if they've been modified.
#
# There are _some_ known exceptions though.
#
# For example, if the value is unsaved (meaning the user has set it), and
# it looks like the API resource is persisted with an ID, then we include
# the object so that parameters are serialized with a reference to its
# ID.
#
# Another example is that on save API calls it's sometimes desirable to
# update a customer's default source by setting a new card (or other)
# object with `#source=` and then saving the customer. The
# `#save_with_parent` flag to override the default behavior allows us to
# handle these exceptions.
#
# We throw an error if a property was set explicitly but we can't do
# anything with it because the integration is probably not working as the
# user intended it to.
elsif value.is_a?(APIResource) && !value.save_with_parent
if !unsaved
nil
elsif value.respond_to?(:id) && !value.id.nil?
value
else
raise ArgumentError, "Cannot save property `#{key}` containing " \
"an API resource. It doesn't appear to be persisted and is " \
"not marked as `save_with_parent`."
end
elsif value.is_a?(Array)
update = value.map { |v| serialize_params_value(v, nil, true, force) }
# This prevents an array that's unchanged from being resent.
update if update != serialize_params_value(original, nil, true, force)
# Handle a Hash for now, but in the long run we should be able to
# eliminate all places where hashes are stored as values internally by
# making sure any time one is set, we convert it to a StripeObject. This
# will simplify our model by making data within an object more
# consistent.
#
# For now, you can still run into a hash if someone appends one to an
# existing array being held by a StripeObject. This could happen for
# example by appending a new hash onto `additional_owners` for an
# account.
elsif value.is_a?(Hash)
Util.convert_to_stripe_object(value, @opts).serialize_params
elsif value.is_a?(StripeObject)
update = value.serialize_params(force: force)
# If the entire object was replaced and this is an additive object,
# then we need blank each field of the old object that held a value
# because otherwise the update to the keys of the object will be
# additive instead of a full replacement. The new serialized values
# will override any of these empty values.
if original && unsaved && key && self.class.additive_object_param?(key)
update = empty_values(original).merge(update)
end
update
else
value
end
end
private
# Produces a deep copy of the given object including support for arrays,
# hashes, and StripeObjects.
def self.deep_copy(obj)
case obj
when Array
obj.map { |e| deep_copy(e) }
when Hash
obj.each_with_object({}) do |(k, v), copy|
copy[k] = deep_copy(v)
copy
end
when StripeObject
obj.class.construct_from(
deep_copy(obj.instance_variable_get(:@values)),
obj.instance_variable_get(:@opts).select do |k, _v|
Util::OPTS_COPYABLE.include?(k)
end
)
else
obj
end
end
private_class_method :deep_copy
def dirty_value!(value)
case value
when Array
value.map { |v| dirty_value!(v) }
when StripeObject
value.dirty!
end
end
# Returns a hash of empty values for all the values that are in the given
# StripeObject.
end
|
CocoaPods/Xcodeproj | lib/xcodeproj/workspace.rb | Xcodeproj.Workspace.xcworkspace_element_start_xml | ruby | def xcworkspace_element_start_xml(depth, elem)
attributes = case elem.name
when 'Group'
%w(location name)
when 'FileRef'
%w(location)
end
contents = "<#{elem.name}"
indent = ' ' * depth
attributes.each { |name| contents += "\n #{name} = \"#{elem.attribute(name)}\"" }
contents.split("\n").map { |line| "#{indent}#{line}" }.join("\n") + ">\n"
end | @param [Integer] depth The depth of the element in the tree
@param [REXML::Document::Element] elem The XML element to format.
@return [String] The Xcode-specific XML formatting of an element start | train | https://github.com/CocoaPods/Xcodeproj/blob/3be1684437a6f8e69c7836ad4c85a2b78663272f/lib/xcodeproj/workspace.rb#L251-L262 | class Workspace
# @return [REXML::Document] the parsed XML model for the workspace contents
attr_reader :document
# @return [Hash<String => String>] a mapping from scheme name to project full path
# containing the scheme
attr_reader :schemes
# @return [Array<FileReference>] the paths of the projects contained in the
# workspace.
#
def file_references
return [] unless @document
@document.get_elements('/Workspace//FileRef').map do |node|
FileReference.from_node(node)
end
end
# @return [Array<GroupReference>] the groups contained in the workspace
#
def group_references
return [] unless @document
@document.get_elements('/Workspace//Group').map do |node|
GroupReference.from_node(node)
end
end
# @param [REXML::Document] document @see document
# @param [Array<FileReference>] file_references additional projects to add
#
# @note The document parameter is passed to the << operator if it is not a
# valid REXML::Document. It is optional, but may also be passed as nil
#
def initialize(document, *file_references)
@schemes = {}
if document.nil?
@document = REXML::Document.new(root_xml(''))
elsif document.is_a?(REXML::Document)
@document = document
else
@document = REXML::Document.new(root_xml(''))
self << document
end
file_references.each { |ref| self << ref }
end
#-------------------------------------------------------------------------#
# Returns a workspace generated by reading the contents of the given path.
#
# @param [String] path
# the path of the `xcworkspace` file.
#
# @return [Workspace] the generated workspace.
#
def self.new_from_xcworkspace(path)
from_s(File.read(File.join(path, 'contents.xcworkspacedata')),
File.expand_path(path))
rescue Errno::ENOENT
new(nil)
end
#-------------------------------------------------------------------------#
# Returns a workspace generated by reading the contents of the given
# XML representation.
#
# @param [String] xml
# the XML representation of the workspace.
#
# @return [Workspace] the generated workspace.
#
def self.from_s(xml, workspace_path = '')
document = REXML::Document.new(xml)
instance = new(document)
instance.load_schemes(workspace_path)
instance
end
# Adds a new path to the list of the of projects contained in the
# workspace.
# @param [String, Xcodeproj::Workspace::FileReference] path_or_reference
# A string or Xcode::Workspace::FileReference containing a path to an Xcode project
#
# @raise [ArgumentError] Raised if the input is neither a String nor a FileReference
#
# @return [void]
#
def <<(path_or_reference)
return unless @document && @document.respond_to?(:root)
case path_or_reference
when String
project_file_reference = Xcodeproj::Workspace::FileReference.new(path_or_reference)
when Xcodeproj::Workspace::FileReference
project_file_reference = path_or_reference
projpath = nil
else
raise ArgumentError, "Input to the << operator must be a file path or FileReference, got #{path_or_reference.inspect}"
end
@document.root.add_element(project_file_reference.to_node)
load_schemes_from_project File.expand_path(projpath || project_file_reference.path)
end
#-------------------------------------------------------------------------#
# Adds a new group container to the workspace
# workspace.
#
# @param [String] name The name of the group
#
# @yield [Xcodeproj::Workspace::GroupReference, REXML::Element]
# Yields the GroupReference and underlying XML element for mutation
#
# @return [Xcodeproj::Workspace::GroupReference] The added group reference
#
def add_group(name)
return nil unless @document
group = Xcodeproj::Workspace::GroupReference.new(name)
elem = @document.root.add_element(group.to_node)
yield group, elem if block_given?
group
end
# Checks if the workspace contains the project with the given file
# reference.
#
# @param [FileReference] file_reference
# The file_reference to the project.
#
# @return [Boolean] whether the project is contained in the workspace.
#
def include?(file_reference)
file_references.include?(file_reference)
end
# @return [String] the XML representation of the workspace.
#
def to_s
contents = ''
stack = []
@document.root.each_recursive do |elem|
until stack.empty?
last = stack.last
break if last == elem.parent
contents += xcworkspace_element_end_xml(stack.length, last)
stack.pop
end
stack << elem
contents += xcworkspace_element_start_xml(stack.length, elem)
end
until stack.empty?
contents += xcworkspace_element_end_xml(stack.length, stack.last)
stack.pop
end
root_xml(contents)
end
# Saves the workspace at the given `xcworkspace` path.
#
# @param [String] path
# the path where to save the project.
#
# @return [void]
#
def save_as(path)
FileUtils.mkdir_p(path)
File.open(File.join(path, 'contents.xcworkspacedata'), 'w') do |out|
out << to_s
end
end
#-------------------------------------------------------------------------#
# Load all schemes from all projects in workspace or in the workspace container itself
#
# @param [String] workspace_dir_path
# path of workspaces dir
#
# @return [void]
#
def load_schemes(workspace_dir_path)
# Normalizes path to directory of workspace needed for file_reference.absolute_path
workspaces_dir = workspace_dir_path
if File.extname(workspace_dir_path) == '.xcworkspace'
workspaces_dir = File.expand_path('..', workspaces_dir)
end
file_references.each do |file_reference|
project_full_path = file_reference.absolute_path(workspaces_dir)
load_schemes_from_project(project_full_path)
end
# Load schemes that are in the workspace container.
workspace_abs_path = File.absolute_path(workspace_dir_path)
Dir[File.join(workspace_dir_path, 'xcshareddata', 'xcschemes', '*.xcscheme')].each do |scheme|
scheme_name = File.basename(scheme, '.xcscheme')
@schemes[scheme_name] = workspace_abs_path
end
end
private
# Load all schemes from project
#
# @param [String] project_full_path
# project full path
#
# @return [void]
#
def load_schemes_from_project(project_full_path)
schemes = Xcodeproj::Project.schemes project_full_path
schemes.each do |scheme_name|
@schemes[scheme_name] = project_full_path
end
end
# @return [String] The template of the workspace XML as formated by Xcode.
#
# @param [String] contents The XML contents of the workspace.
#
def root_xml(contents)
<<-DOC
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
#{contents.rstrip}
</Workspace>
DOC
end
#
# @param [Integer] depth The depth of the element in the tree
# @param [REXML::Document::Element] elem The XML element to format.
#
# @return [String] The Xcode-specific XML formatting of an element start
#
#
# @param [Integer] depth The depth of the element in the tree
# @param [REXML::Document::Element] elem The XML element to format.
#
# @return [String] The Xcode-specific XML formatting of an element end
#
def xcworkspace_element_end_xml(depth, elem)
"#{' ' * depth}</#{elem.name}>\n"
end
#-------------------------------------------------------------------------#
end
|
zed-0xff/zpng | lib/zpng/color.rb | ZPNG.Color.to_depth | ruby | def to_depth new_depth
return self if depth == new_depth
color = Color.new :depth => new_depth
if new_depth > self.depth
%w'r g b a'.each do |part|
color.send("#{part}=", (2**new_depth-1)/(2**depth-1)*self.send(part))
end
else
# new_depth < self.depth
%w'r g b a'.each do |part|
color.send("#{part}=", self.send(part)>>(self.depth-new_depth))
end
end
color
end | change bit depth, return new Color | train | https://github.com/zed-0xff/zpng/blob/d356182ab9bbc2ed3fe5c064488498cf1678b0f0/lib/zpng/color.rb#L159-L174 | class Color
attr_accessor :r, :g, :b
attr_reader :a
attr_accessor :depth
include DeepCopyable
def initialize *a
h = a.last.is_a?(Hash) ? a.pop : {}
@r,@g,@b,@a = *a
# default sample depth for r,g,b and alpha = 8 bits
@depth = h[:depth] || 8
# default ALPHA = 0xff - opaque
@a ||= h[:alpha] || h[:a] || (2**@depth-1)
end
def a= a
@a = a || (2**@depth-1) # NULL alpha means fully opaque
end
alias :alpha :a
alias :alpha= :a=
BLACK = Color.new(0 , 0, 0)
WHITE = Color.new(255,255,255)
RED = Color.new(255, 0, 0)
GREEN = Color.new(0 ,255, 0)
BLUE = Color.new(0 , 0,255)
YELLOW= Color.new(255,255, 0)
CYAN = Color.new( 0,255,255)
PURPLE= MAGENTA =
Color.new(255, 0,255)
TRANSPARENT = Color.new(0,0,0,0)
ANSI_COLORS = [:black, :red, :green, :yellow, :blue, :magenta, :cyan, :white]
#ASCII_MAP = %q_ .`,-:;~"!<+*^(LJ=?vctsxj12FuoCeyPSah5wVmXA4G9$OR0MQNW#&%@_
#ASCII_MAP = %q_ .`,-:;~"!<+*^=VXMQNW#&%@_
#ASCII_MAP = %q_ .,:"!*=7FZVXM#%@_
# see misc/gen_ascii_map.rb
ASCII_MAP =
[" '''''''```,,",
",,---:::::;;;;~~\"\"\"\"",
"\"!!!!!!<++*^^^(((LLJ",
"=??vvv]ts[j1122FFuoo",
"CeyyPEah55333VVmmXA4",
"G9$666666RRRRRR00MQQ",
"NNW####&&&&&%%%%%%%%",
"@@@@@@@"].join
# euclidian distance - http://en.wikipedia.org/wiki/Euclidean_distance
def euclidian other_color
# TODO: different depths
r = (self.r.to_i - other_color.r.to_i)**2
r += (self.g.to_i - other_color.g.to_i)**2
r += (self.b.to_i - other_color.b.to_i)**2
Math.sqrt r
end
def white?
max = 2**depth-1
r == max && g == max && b == max
end
def black?
r == 0 && g == 0 && b == 0
end
def transparent?
a == 0
end
def opaque?
a.nil? || a == 2**depth-1
end
def to_grayscale
(r+g+b)/3
end
def to_gray_alpha
[to_grayscale, alpha]
end
class << self
# from_grayscale level
# from_grayscale level, :depth => 16
# from_grayscale level, alpha
# from_grayscale level, alpha, :depth => 16
def from_grayscale value, *args
Color.new value,value,value, *args
end
# value: (String) "#ff00ff", "#f0f", "f0f", "eebbcc"
# alpha can be set via :alpha => N optional hash argument
def from_html value, *args
s = value.tr('#','')
case s.size
when 3
r,g,b = s.split('').map{ |x| x.to_i(16)*17 }
when 6
r,g,b = s.scan(/../).map{ |x| x.to_i(16) }
else
raise ArgumentError, "invalid HTML color #{s}"
end
Color.new r,g,b, *args
end
alias :from_css :from_html
end
########################################################
# simple conversions
def to_i
((a||0) << 24) + ((r||0) << 16) + ((g||0) << 8) + (b||0)
end
def to_s
"%02X%02X%02X" % [r,g,b]
end
def to_a
[r, g, b, a]
end
########################################################
# complex conversions
# try to convert to one pseudographics ASCII character
def to_ascii map=ASCII_MAP
#p self
map[self.to_grayscale*(map.size-1)/(2**@depth-1), 1]
end
# convert to ANSI color name
def to_ansi
return to_depth(8).to_ansi if depth != 8
a = ANSI_COLORS.map{|c| self.class.const_get(c.to_s.upcase) }
a.map!{ |c| self.euclidian(c) }
ANSI_COLORS[a.index(a.min)]
end
# HTML/CSS color in notation like #33aa88
def to_css
return to_depth(8).to_css if depth != 8
"#%02X%02X%02X" % [r,g,b]
end
alias :to_html :to_css
########################################################
# change bit depth, return new Color
def inspect
s = "#<ZPNG::Color"
if depth == 16
s << " r=" + (r ? "%04x" % r : "????")
s << " g=" + (g ? "%04x" % g : "????")
s << " b=" + (b ? "%04x" % b : "????")
s << " alpha=%04x" % alpha if alpha && alpha != 0xffff
else
s << " #"
s << (r ? "%02x" % r : "??")
s << (g ? "%02x" % g : "??")
s << (b ? "%02x" % b : "??")
s << " alpha=%02x" % alpha if alpha && alpha != 0xff
end
s << " depth=#{depth}" if depth != 8
s << ">"
end
# compare with other color
def == c
return false unless c.is_a?(Color)
c1,c2 =
if self.depth > c.depth
[self, c.to_depth(self.depth)]
else
[self.to_depth(c.depth), c]
end
c1.r == c2.r && c1.g == c2.g && c1.b == c2.b && c1.a == c2.a
end
alias :eql? :==
# compare with other color
def <=> c
c1,c2 =
if self.depth > c.depth
[self, c.to_depth(self.depth)]
else
[self.to_depth(c.depth), c]
end
r = c1.to_grayscale <=> c2.to_grayscale
r == 0 ? (c1.to_a <=> c2.to_a) : r
end
# subtract other color from this one, returns new Color
def - c
op :-, c
end
# add other color to this one, returns new Color
def + c
op :+, c
end
# XOR this color with other one, returns new Color
def ^ c
op :^, c
end
# AND this color with other one, returns new Color
def & c
op :&, c
end
# OR this color with other one, returns new Color
def | c
op :|, c
end
# Op! op! op! Op!! Oppan Gangnam Style!!
def op op, c=nil
# XXX what to do with alpha?
max = 2**depth-1
if c
c = c.to_depth(depth)
Color.new(
@r.send(op, c.r) & max,
@g.send(op, c.g) & max,
@b.send(op, c.b) & max,
:depth => self.depth
)
else
Color.new(
@r.send(op) & max,
@g.send(op) & max,
@b.send(op) & max,
:depth => self.depth
)
end
end
# for Array.uniq()
def hash
self.to_i
end
end
|
moneta-rb/moneta | lib/moneta/expires.rb | Moneta.Expires.delete | ruby | def delete(key, options = {})
return super if options.include?(:raw)
value, expires = super
value if !expires || Time.now <= Time.at(expires)
end | (see Proxy#delete) | train | https://github.com/moneta-rb/moneta/blob/26a118c8b2c93d11257f4a5fe9334a8157f4db47/lib/moneta/expires.rb#L44-L48 | class Expires < Proxy
include ExpiresSupport
# @param [Moneta store] adapter The underlying store
# @param [Hash] options
# @option options [String] :expires Default expiration time
def initialize(adapter, options = {})
raise 'Store already supports feature :expires' if adapter.supports?(:expires)
super
self.default_expires = options[:expires]
end
# (see Proxy#key?)
def key?(key, options = {})
# Transformer might raise exception
load_entry(key, options) != nil
rescue Exception
super(key, Utils.without(options, :expires))
end
# (see Proxy#load)
def load(key, options = {})
return super if options.include?(:raw)
value, expires = load_entry(key, options)
value
end
# (see Proxy#store)
def store(key, value, options = {})
return super if options.include?(:raw)
expires = expires_at(options)
super(key, new_entry(value, expires), Utils.without(options, :expires))
value
end
# (see Proxy#delete)
# (see Proxy#store)
def create(key, value, options = {})
return super if options.include?(:raw)
expires = expires_at(options)
@adapter.create(key, new_entry(value, expires), Utils.without(options, :expires))
end
# (see Defaults#values_at)
def values_at(*keys, **options)
return super if options.include?(:raw)
new_expires = expires_at(options, nil)
options = Utils.without(options, :expires)
with_updates(options) do |updates|
keys.zip(@adapter.values_at(*keys, **options)).map do |key, entry|
entry = invalidate_entry(key, entry, new_expires) do |new_entry|
updates[key] = new_entry
end
next if entry.nil?
value, _ = entry
value
end
end
end
# (see Defaults#fetch_values)
def fetch_values(*keys, **options)
return super if options.include?(:raw)
new_expires = expires_at(options, nil)
options = Utils.without(options, :expires)
substituted = {}
block = if block_given?
lambda do |key|
substituted[key] = true
yield key
end
end
with_updates(options) do |updates|
keys.zip(@adapter.fetch_values(*keys, **options, &block)).map do |key, entry|
next entry if substituted[key]
entry = invalidate_entry(key, entry, new_expires) do |new_entry|
updates[key] = new_entry
end
if entry.nil?
value = if block_given?
yield key
end
else
value, _ = entry
end
value
end
end
end
# (see Defaults#slice)
def slice(*keys, **options)
return super if options.include?(:raw)
new_expires = expires_at(options, nil)
options = Utils.without(options, :expires)
with_updates(options) do |updates|
@adapter.slice(*keys, **options).map do |key, entry|
entry = invalidate_entry(key, entry, new_expires) do |new_entry|
updates[key] = new_entry
end
next if entry.nil?
value, _ = entry
[key, value]
end.reject(&:nil?)
end
end
# (see Defaults#merge!)
def merge!(pairs, options={})
expires = expires_at(options)
options = Utils.without(options, :expires)
block = if block_given?
lambda do |key, old_entry, entry|
old_entry = invalidate_entry(key, old_entry)
if old_entry.nil?
entry # behave as if no replace is happening
else
old_value, _ = old_entry
new_value, _ = entry
new_entry(yield(key, old_value, new_value), expires)
end
end
end
entry_pairs = pairs.map do |key, value|
[key, new_entry(value, expires)]
end
@adapter.merge!(entry_pairs, options, &block)
self
end
private
def load_entry(key, options)
new_expires = expires_at(options, nil)
options = Utils.without(options, :expires)
entry = @adapter.load(key, options)
invalidate_entry(key, entry, new_expires) do |new_entry|
@adapter.store(key, new_entry, options)
end
end
def invalidate_entry(key, entry, new_expires = nil)
if entry != nil
value, expires = entry
if expires && Time.now > Time.at(expires)
delete(key)
entry = nil
elsif new_expires != nil
yield new_entry(value, new_expires) if block_given?
end
end
entry
end
def new_entry(value, expires)
if expires
[value, expires.to_r]
elsif Array === value || value == nil
[value]
else
value
end
end
def with_updates(options)
updates = {}
yield(updates).tap do
@adapter.merge!(updates, options) unless updates.empty?
end
end
end
|
bmuller/ankusa | lib/ankusa/cassandra_storage.rb | Ankusa.CassandraStorage.incr_doc_count | ruby | def incr_doc_count(klass, count)
klass = klass.to_s
doc_count = @cassandra.get(:totals, klass, "doc_count").values.last.to_i
doc_count += count
@cassandra.insert(:totals, klass, {"doc_count" => doc_count.to_s})
@klass_doc_counts[klass.to_sym] = doc_count
end | Increment total document count for a given class by 'count' | train | https://github.com/bmuller/ankusa/blob/af946f130aa63532fdb67d8382cfaaf81b38027b/lib/ankusa/cassandra_storage.rb#L159-L165 | class CassandraStorage
attr_reader :cassandra
#
# Necessary to set max classes since current implementation of ruby
# cassandra client doesn't support table scans. Using crufty get_range
# method at the moment.
#
def initialize(host='127.0.0.1', port=9160, keyspace = 'ankusa', max_classes = 100)
@cassandra = Cassandra.new('system', "#{host}:#{port}")
@klass_word_counts = {}
@klass_doc_counts = {}
@keyspace = keyspace
@max_classes = max_classes
init_tables
end
#
# Fetch the names of the distinct classes for classification:
# eg. :spam, :good, etc
#
def classnames
@cassandra.get_range(:totals, {:start => '', :finish => '', :count => @max_classes}).inject([]) do |cs, key_slice|
cs << key_slice.key.to_sym
end
end
def reset
drop_tables
init_tables
end
#
# Drop ankusa keyspace, reset internal caches
#
# FIXME: truncate doesn't work with cassandra-beta2
#
def drop_tables
@cassandra.truncate!('classes')
@cassandra.truncate!('totals')
@cassandra.drop_keyspace(@keyspace)
@klass_word_counts = {}
@klass_doc_counts = {}
end
#
# Create required keyspace and column families
#
def init_tables
# Do nothing if keyspace already exists
if @cassandra.keyspaces.include?(@keyspace)
@cassandra.keyspace = @keyspace
else
freq_table = Cassandra::ColumnFamily.new({:keyspace => @keyspace, :name => "classes"}) # word => {classname => count}
summary_table = Cassandra::ColumnFamily.new({:keyspace => @keyspace, :name => "totals"}) # class => {wordcount => count}
ks_def = Cassandra::Keyspace.new({
:name => @keyspace,
:strategy_class => 'org.apache.cassandra.locator.SimpleStrategy',
:replication_factor => 1,
:cf_defs => [freq_table, summary_table]
})
@cassandra.add_keyspace ks_def
@cassandra.keyspace = @keyspace
end
end
#
# Fetch hash of word counts as a single row from cassandra.
# Here column_name is the class and column value is the count
#
def get_word_counts(word)
# fetch all (class,count) pairs for a given word
row = @cassandra.get(:classes, word.to_s)
return row.to_hash if row.empty?
row.inject({}){|counts, col| counts[col.first.to_sym] = [col.last.to_f,0].max; counts}
end
#
# Does a table 'scan' of summary table pulling out the 'vocabsize' column
# from each row. Generates a hash of (class, vocab_size) key value pairs
#
def get_vocabulary_sizes
get_summary "vocabsize"
end
#
# Fetch total word count for a given class and cache it
#
def get_total_word_count(klass)
@klass_word_counts[klass] = @cassandra.get(:totals, klass.to_s, "wordcount").values.last.to_f
end
#
# Fetch total documents for a given class and cache it
#
def get_doc_count(klass)
@klass_doc_counts[klass] = @cassandra.get(:totals, klass.to_s, "doc_count").values.last.to_f
end
#
# Increment the count for a given (word,class) pair. Evidently, cassandra
# does not support atomic increment/decrement. Psh. HBase uses ZooKeeper to
# implement atomic operations, ain't it special?
#
def incr_word_count(klass, word, count)
# Only wants strings
klass = klass.to_s
word = word.to_s
prior_count = @cassandra.get(:classes, word, klass).values.last.to_i
new_count = prior_count + count
@cassandra.insert(:classes, word, {klass => new_count.to_s})
if (prior_count == 0 && count > 0)
#
# we've never seen this word before and we're not trying to unlearn it
#
vocab_size = @cassandra.get(:totals, klass, "vocabsize").values.last.to_i
vocab_size += 1
@cassandra.insert(:totals, klass, {"vocabsize" => vocab_size.to_s})
elsif new_count == 0
#
# we've seen this word before but we're trying to unlearn it
#
vocab_size = @cassandra.get(:totals, klass, "vocabsize").values.last.to_i
vocab_size -= 1
@cassandra.insert(:totals, klass, {"vocabsize" => vocab_size.to_s})
end
new_count
end
#
# Increment total word count for a given class by 'count'
#
def incr_total_word_count(klass, count)
klass = klass.to_s
wordcount = @cassandra.get(:totals, klass, "wordcount").values.last.to_i
wordcount += count
@cassandra.insert(:totals, klass, {"wordcount" => wordcount.to_s})
@klass_word_counts[klass.to_sym] = wordcount
end
#
# Increment total document count for a given class by 'count'
#
def doc_count_totals
get_summary "doc_count"
end
#
# Doesn't do anything
#
def close
end
protected
#
# Fetch 100 rows from summary table, yes, increase if necessary
#
def get_summary(name)
counts = {}
@cassandra.get_range(:totals, {:start => '', :finish => '', :count => @max_classes}).each do |key_slice|
# keyslice is a clunky thrift object, map into a ruby hash
row = key_slice.columns.inject({}){|hsh, c| hsh[c.column.name] = c.column.value; hsh}
counts[key_slice.key.to_sym] = row[name].to_f
end
counts
end
end
|
paxtonhare/marklogic-ruby-driver | lib/marklogic/collection.rb | MarkLogic.Collection.from_criteria | ruby | def from_criteria(criteria)
queries = []
criteria.each do |k, v|
name, operator, index_type, value = nil
query_options = {}
if (v.is_a?(Hash))
name = k.to_s
query_options.merge!(v.delete(:options) || {})
sub_queries = []
v.each do |kk, vv|
operator = kk.to_s.gsub('$', '').upcase || "EQ"
if @operators.include?(operator)
value = vv
value = value.to_s if value.is_a?(MarkLogic::ObjectId)
sub_queries << build_query(name, operator, value, query_options)
elsif value.is_a?(Hash)
child_queries = value.map do |kk, vv|
build_query(kk, vv, query_options)
end
sub_queries << Queries::ContainerQuery.new(name, Queries::AndQuery.new(child_queries))
end
end
if sub_queries.length > 1
queries << Queries::AndQuery.new(sub_queries)
elsif sub_queries.length == 1
queries << sub_queries[0]
end
else
name = k.to_s
value = v
operator = "EQ"
queries << build_query(name, operator, value, query_options)
end
end
if queries.length > 1
MarkLogic::Queries::AndQuery.new(*queries)
elsif queries.length == 1
queries[0]
end
end | Builds a MarkLogic Query from Mongo Style Criteria
@param [Hash] criteria The Criteria to use when searching
@example Build a query from criteria
# Query on age == 3
collection.from_criteria({ 'age' => { '$eq' => 3 } })
# Query on age < 3
collection.from_criteria({ 'age' => { '$lt' => 3 } })
# Query on age <= 3
collection.from_criteria({ 'age' => { '$le' => 3 } })
# Query on age > 3
collection.from_criteria({ 'age' => { '$gt' => 3 } })
# Query on age >= 3
collection.from_criteria({ 'age' => { '$ge' => 3 } })
# Query on age != 3
collection.from_criteria({ 'age' => { '$ne' => 3 } })
@since 0.0.1 | train | https://github.com/paxtonhare/marklogic-ruby-driver/blob/76c3f2c2da7b8266cdbe786b7f3e910e0983eb9b/lib/marklogic/collection.rb#L174-L218 | class Collection
attr_accessor :collection
attr_reader :database
alias_method :name, :collection
def initialize(name, database)
@collection = name
@database = database
@operators = %w{GT LT GE LE EQ NE ASC DESC}
end
def count
MarkLogic::Cursor.new(self).count
end
def load(id)
url = "/v1/documents?uri=#{gen_uri(id)}&format=json"
response = @database.connection.get(url)
raise Exception.new("Invalid response: #{response.code.to_i}, #{response.body}") unless response.code.to_i == 200
Oj.load(response.body)
end
def save(doc)
if (doc.is_a?(Array))
docs = {}
doc.each do |d|
docs[doc_uri(d)] = ::Oj.dump(d, mode: :compat)
end
body = build_multipart_body(docs)
response = @database.connection.post_multipart("/v1/documents", body)
raise Exception.new("Invalid response: #{response.code.to_i}, #{response.body}\n") unless response.code.to_i == 200
else
uri = doc_uri(doc)
url = "/v1/documents?uri=#{uri}&format=json&collection=#{collection}"
json = ::Oj.dump(doc, mode: :compat)
response = @database.connection.put(url, json)
raise Exception.new("Invalid response: #{response.code.to_i}, #{response.body}\n") unless [201, 204].include? response.code.to_i
doc[:_id] || doc[:id] || doc['_id'] || doc['id']
end
end
def update(selector, document, opts={})
find(selector).each do |doc|
document.each do |key, value|
case key
when "$set"
value.each do |kk, vv|
doc[kk.to_s] = vv
end
when "$inc"
value.each do |kk, vv|
prev = doc[kk.to_s] || 0
doc[kk.to_s] = prev + vv
end
when "$unset"
value.keys.each do |kk|
doc.delete(kk.to_s)
end
when "$push"
value.each do |kk, vv|
if doc.has_key?(kk.to_s)
doc[kk.to_s].push(vv)
else
doc[kk.to_s] = [vv]
end
end
when "$pushAll"
value.each do |kk, vv|
if doc.has_key?(kk.to_s)
doc[kk.to_s] = doc[kk.to_s] + vv
else
doc[kk.to_s] = vv
end
end
end
save(doc)
end
end
end
alias_method :create, :save
alias_method :insert, :save
def remove(query = nil, options = {})
if query.nil? || (query.is_a?(Hash) && query.empty?)
drop
else
if query.class == Hash
query = from_criteria(query)
elsif query.nil?
query = Queries::AndQuery.new()
end
xqy = %Q{cts:search(fn:collection("#{collection}"), #{query.to_xqy}, ("unfiltered")) / xdmp:node-delete(.)}
response = @database.connection.run_query(xqy, "xquery")
raise Exception.new("Invalid response: #{response.code.to_i}, #{response.body}") unless response.code.to_i == 200
end
end
def drop
url = "/v1/search?collection=#{collection}"
response [email protected](url)
raise Exception.new("Invalid response: #{response.code.to_i}, #{response.body}") unless [204].include? response.code.to_i
end
def find_one(query = nil, options = {})
opts = options.merge(:per_page => 1)
find(query, opts).next
end
def find(query = nil, options = {})
if query.class == Hash
query = from_criteria(query)
elsif query.nil?
query = Queries::AndQuery.new()
end
options[:query] = query
cursor = MarkLogic::Cursor.new(self, options)
if block_given?
yield cursor
nil
else
cursor
end
end
def build_query(name, operator, value, query_options = {})
if database.has_range_index?(name) && (query_options.has_key?(:case_sensitive) == false || query_options[:case_sensitive] == true)
index = database.range_index(name)
type = index.scalar_type
Queries::RangeQuery.new(name, operator, type, value, query_options)
elsif operator != 'EQ'
raise MissingIndexError.new("Missing index on #{name}")
elsif value.nil?
Queries::OrQuery.new([
Queries::ValueQuery.new(name, value, query_options),
Queries::NotQuery.new(Queries::ContainerQuery.new(name, Queries::AndQuery.new))
])
elsif operator == 'EQ'
Queries::ValueQuery.new(name, value, query_options)
end
end
# Builds a MarkLogic Query from Mongo Style Criteria
#
# @param [Hash] criteria The Criteria to use when searching
#
# @example Build a query from criteria
#
# # Query on age == 3
# collection.from_criteria({ 'age' => { '$eq' => 3 } })
#
# # Query on age < 3
# collection.from_criteria({ 'age' => { '$lt' => 3 } })
#
# # Query on age <= 3
# collection.from_criteria({ 'age' => { '$le' => 3 } })
#
# # Query on age > 3
# collection.from_criteria({ 'age' => { '$gt' => 3 } })
#
# # Query on age >= 3
# collection.from_criteria({ 'age' => { '$ge' => 3 } })
#
# # Query on age != 3
# collection.from_criteria({ 'age' => { '$ne' => 3 } })
#
# @since 0.0.1
def to_s
%Q{collection: #{collection}}
end
def inspect
as_nice_string = [
" collection: #{collection.inspect}",
" database: #{database.database_name.inspect}"
].join(",")
"#<#{self.class}#{as_nice_string}>"
end
private
def doc_uri(doc)
id = doc[:_id] || doc['_id']
if id.nil?
id = SecureRandom.hex
doc[:_id] = id
end
gen_uri(id)
end
def gen_uri(id)
if id.is_a?(Hash)
id_str = id.hash.to_s
else
id_str = id.to_s
end
%Q{/#{collection}/#{id_str}.json}
end
def build_multipart_body(docs, boundary = "BOUNDARY")
tmp = ""
# collection
metadata = ::Oj.dump({ collections: [ collection ]}, mode: :compat)
tmp << %Q{--#{boundary}\r\n}
tmp << %Q{Content-Type: application/json\r\n}
tmp << %Q{Content-Disposition: inline; category=metadata\r\n}
tmp << %Q{Content-Length: #{metadata.size}\r\n\r\n}
tmp << metadata
tmp << %Q{\r\n}
docs.each do |uri, doc|
# doc
tmp << %Q{--#{boundary}\r\n}
tmp << %Q{Content-Type: application/json\r\n}
tmp << %Q{Content-Disposition: attachment; filename="#{uri}"; category=content; format=json\r\n}
tmp << %Q{Content-Length: #{doc.size}\r\n\r\n}
tmp << doc
tmp << %Q{\r\n}
end
tmp << "--#{boundary}--"
tmp
end
end
|
Falkor/falkorlib | lib/falkorlib/common.rb | FalkorLib.Common.store_config | ruby | def store_config(filepath, hash, options = {})
content = "# " + File.basename(filepath) + "\n"
content += "# /!\\ DO NOT EDIT THIS FILE: it has been automatically generated\n"
if options[:header]
options[:header].split("\n").each { |line| content += "# #{line}" }
end
content += hash.to_yaml
show_diff_and_write(content, filepath, options)
# File.open( filepath, 'w') do |f|
# f.print "# ", File.basename(filepath), "\n"
# f.puts "# /!\\ DO NOT EDIT THIS FILE: it has been automatically generated"
# if options[:header]
# options[:header].split("\n").each do |line|
# f.puts "# #{line}"
# end
# end
# f.puts hash.to_yaml
# end
end | Store the Hash object as a Yaml file
Supported options:
:header [string]: additional info to place in the header of the (stored) file
:no_interaction [boolean]: do not interact | train | https://github.com/Falkor/falkorlib/blob/1a6d732e8fd5550efb7c98a87ee97fcd2e051858/lib/falkorlib/common.rb#L289-L307 | module Common
module_function
##################################
### Default printing functions ###
##################################
# Print a text in bold
def bold(str)
(COLOR == true) ? Term::ANSIColor.bold(str) : str
end
# Print a text in green
def green(str)
(COLOR == true) ? Term::ANSIColor.green(str) : str
end
# Print a text in red
def red(str)
(COLOR == true) ? Term::ANSIColor.red(str) : str
end
# Print a text in cyan
def cyan(str)
(COLOR == true) ? Term::ANSIColor.cyan(str) : str
end
# Print an info message
def info(str)
puts green("[INFO] " + str)
end
# Print an warning message
def warning(str)
puts cyan("/!\\ WARNING: " + str)
end
alias_method :warn, :warning
## Print an error message and abort
def error(str)
#abort red("*** ERROR *** " + str)
$stderr.puts red("*** ERROR *** " + str)
exit 1
end
## simple helper text to mention a non-implemented feature
def not_implemented
error("NOT YET IMPLEMENTED")
end
##############################
### Interaction functions ###
##############################
## Ask a question
def ask(question, default_answer = '')
return default_answer if FalkorLib.config[:no_interaction]
print "#{question} "
print "[Default: #{default_answer}]" unless default_answer == ''
print ": "
STDOUT.flush
answer = STDIN.gets.chomp
(answer.empty?) ? default_answer : answer
end
## Ask whether or not to really continue
def really_continue?(default_answer = 'Yes')
return if FalkorLib.config[:no_interaction]
pattern = (default_answer =~ /yes/i) ? '(Y|n)' : '(y|N)'
answer = ask( cyan("=> Do you really want to continue #{pattern}?"), default_answer)
exit 0 if answer =~ /n.*/i
end
############################
### Execution functions ###
############################
## Check for the presence of a given command
def command?(name)
`which #{name}`
$?.success?
end
## Execute a given command, return exit code and print nicely stdout and stderr
def nice_execute(cmd)
puts bold("[Running] #{cmd.gsub(/^\s*/, ' ')}")
stdout, stderr, exit_status = Open3.capture3( cmd )
unless stdout.empty?
stdout.each_line do |line|
print "** [out] #{line}"
$stdout.flush
end
end
unless stderr.empty?
stderr.each_line do |line|
$stderr.print red("** [err] #{line}")
$stderr.flush
end
end
exit_status
end
# Simpler version that use the system call
def execute(cmd)
puts bold("[Running] #{cmd.gsub(/^\s*/, ' ')}")
system(cmd)
$?.exitstatus
end
## Execute in a given directory
def execute_in_dir(path, cmd)
exit_status = 0
Dir.chdir(path) do
exit_status = run %( #{cmd} )
end
exit_status
end # execute_in_dir
## Execute a given command - exit if status != 0
def exec_or_exit(cmd)
status = execute(cmd)
if (status.to_i.nonzero?)
error("The command '#{cmd}' failed with exit status #{status.to_i}")
end
status
end
## "Nice" way to present run commands
## Ex: run %{ hostname -f }
def run(cmds)
exit_status = 0
puts bold("[Running]\n#{cmds.gsub(/^\s*/, ' ')}")
$stdout.flush
#puts cmds.split(/\n */).inspect
cmds.split(/\n */).each do |cmd|
next if cmd.empty?
system(cmd.to_s) unless FalkorLib.config.debug
exit_status = $?.exitstatus
end
exit_status
end
## List items from a glob pattern to ask for a unique choice
# Supported options:
# :only_files [boolean]: list only files in the glob
# :only_dirs [boolean]: list only directories in the glob
# :pattern_include [array of strings]: pattern(s) to include for listing
# :pattern_exclude [array of strings]: pattern(s) to exclude for listing
# :text [string]: text to put
def list_items(glob_pattern, options = {})
list = { 0 => 'Exit' }
index = 1
raw_list = { 0 => 'Exit' }
Dir[glob_pattern.to_s].each do |elem|
#puts "=> element '#{elem}' - dir = #{File.directory?(elem)}; file = #{File.file?(elem)}"
next if (!options[:only_files].nil?) && options[:only_files] && File.directory?(elem)
next if (!options[:only_dirs].nil?) && options[:only_dirs] && File.file?(elem)
entry = File.basename(elem)
# unless options[:pattern_include].nil?
# select_entry = false
# options[:pattern_include].each do |pattern|
# #puts "considering pattern '#{pattern}' on entry '#{entry}'"
# select_entry |= entry =~ /#{pattern}/
# end
# next unless select_entry
# end
unless options[:pattern_exclude].nil?
select_entry = false
options[:pattern_exclude].each do |pattern|
#puts "considering pattern '#{pattern}' on entry '#{entry}'"
select_entry |= entry =~ /#{pattern}/
end
next if select_entry
end
#puts "selected entry = '#{entry}'"
list[index] = entry
raw_list[index] = elem
index += 1
end
text = (options[:text].nil?) ? "select the index" : options[:text]
default_idx = (options[:default].nil?) ? 0 : options[:default]
raise SystemExit, 'Empty list' if index == 1
#ap list
#ap raw_list
# puts list.to_yaml
# answer = ask("=> #{text}", "#{default_idx}")
# raise SystemExit.new('exiting selection') if answer == '0'
# raise RangeError.new('Undefined index') if Integer(answer) >= list.length
# raw_list[Integer(answer)]
select_from(list, text, default_idx, raw_list)
end
## Display a indexed list to select an i
def select_from(list, text = 'Select the index', default_idx = 0, raw_list = list)
error "list and raw_list differs in size" if list.size != raw_list.size
l = list
raw_l = raw_list
if list.is_a?(Array)
l = raw_l = { 0 => 'Exit' }
list.each_with_index do |e, idx|
l[idx + 1] = e
raw_l[idx + 1] = raw_list[idx]
end
end
puts l.to_yaml
answer = ask("=> #{text}", default_idx.to_s)
raise SystemExit, 'exiting selection' if answer == '0'
raise RangeError, 'Undefined index' if Integer(answer) >= l.length
raw_l[Integer(answer)]
end # select_from
## Display a indexed list to select multiple indexes
def select_multiple_from(list, text = 'Select the index', default_idx = 1, raw_list = list)
error "list and raw_list differs in size" if list.size != raw_list.size
l = list
raw_l = raw_list
if list.is_a?(Array)
l = raw_l = { 0 => 'Exit', 1 => 'End of selection' }
list.each_with_index do |e, idx|
l[idx + 2] = e
raw_l[idx + 2] = raw_list[idx]
end
end
puts l.to_yaml
choices = Array.new
answer = 0
begin
choices.push(raw_l[Integer(answer)]) if Integer(answer) > 1
answer = ask("=> #{text}", default_idx.to_s)
raise SystemExit, 'exiting selection' if answer == '0'
raise RangeError, 'Undefined index' if Integer(answer) >= l.length
end while Integer(answer) != 1
choices
end # select_multiple_from
###############################
### YAML File loading/store ###
###############################
# Return the yaml content as a Hash object
def load_config(file)
unless File.exist?(file)
raise FalkorLib::Error, "Unable to find the YAML file '#{file}'"
end
loaded = YAML.load_file(file)
unless loaded.is_a?(Hash)
raise FalkorLib::Error, "Corrupted or invalid YAML file '#{file}'"
end
loaded
end
# Store the Hash object as a Yaml file
# Supported options:
# :header [string]: additional info to place in the header of the (stored) file
# :no_interaction [boolean]: do not interact
#################################
### [ERB] template generation ###
#################################
# Bootstrap the destination directory `rootdir` using the template
# directory `templatedir`. the hash table `config` hosts the elements to
# feed ERB files which **should** have the extension .erb.
# The initialization is performed as follows:
# * a rsync process is initiated to duplicate the directory structure
# and the symlinks, and exclude .erb files
# * each erb files (thus with extension .erb) is interpreted, the
# corresponding file is generated without the .erb extension
# Supported options:
# :erb_exclude [array of strings]: pattern(s) to exclude from erb file
# interpretation and thus to copy 'as is'
# :no_interaction [boolean]: do not interact
def init_from_template(templatedir, rootdir, config = {},
options = {
:erb_exclude => [],
:no_interaction => false
})
error "Unable to find the template directory" unless File.directory?(templatedir)
warning "about to initialize/update the directory #{rootdir}"
really_continue? unless options[:no_interaction]
run %( mkdir -p #{rootdir} ) unless File.directory?( rootdir )
run %( rsync --exclude '*.erb' --exclude '.texinfo*' -avzu #{templatedir}/ #{rootdir}/ )
Dir["#{templatedir}/**/*.erb"].each do |erbfile|
relative_outdir = Pathname.new( File.realpath( File.dirname(erbfile) )).relative_path_from Pathname.new(templatedir)
filename = File.basename(erbfile, '.erb')
outdir = File.realpath( File.join(rootdir, relative_outdir.to_s) )
outfile = File.join(outdir, filename)
unless options[:erb_exclude].nil?
exclude_entry = false
options[:erb_exclude].each do |pattern|
exclude_entry |= erbfile =~ /#{pattern}/
end
if exclude_entry
info "copying non-interpreted ERB file"
# copy this file since it has been probably excluded from teh rsync process
run %( cp #{erbfile} #{outdir}/ )
next
end
end
# Let's go
info "updating '#{relative_outdir}/#{filename}'"
puts " using ERB template '#{erbfile}'"
write_from_erb_template(erbfile, outfile, config, options)
end
end
###
# ERB generation of the file `outfile` using the source template file `erbfile`
# Supported options:
# :no_interaction [boolean]: do not interact
# :srcdir [string]: source dir for all considered ERB files
def write_from_erb_template(erbfile, outfile, config = {},
options = {
:no_interaction => false
})
erbfiles = (erbfile.is_a?(Array)) ? erbfile : [ erbfile ]
content = ""
erbfiles.each do |f|
erb = (options[:srcdir].nil?) ? f : File.join(options[:srcdir], f)
unless File.exist?(erb)
warning "Unable to find the template ERBfile '#{erb}'"
really_continue? unless options[:no_interaction]
next
end
content += ERB.new(File.read(erb.to_s), nil, '<>').result(binding)
end
# error "Unable to find the template file #{erbfile}" unless File.exists? (erbfile )
# template = File.read("#{erbfile}")
# output = ERB.new(template, nil, '<>')
# content = output.result(binding)
show_diff_and_write(content, outfile, options)
end
## Show the difference between a `content` string and an destination file (using Diff algorithm).
# Obviosuly, if the outfile does not exists, no difference is proposed.
# Supported options:
# :no_interaction [boolean]: do not interact
# :json_pretty_format [boolean]: write a json content, in pretty format
# :no_commit [boolean]: do not (offer to) commit the changes
# return 0 if nothing happened, 1 if a write has been done
def show_diff_and_write(content, outfile, options = {
:no_interaction => false,
:json_pretty_format => false,
:no_commit => false
})
if File.exist?( outfile )
ref = File.read( outfile )
if options[:json_pretty_format]
ref = JSON.pretty_generate(JSON.parse( IO.read( outfile ) ))
end
if ref == content
warn "Nothing to update"
return 0
end
warn "the file '#{outfile}' already exists and will be overwritten."
warn "Expected difference: \n------"
Diffy::Diff.default_format = :color
puts Diffy::Diff.new(ref, content, :context => 1)
else
watch = (options[:no_interaction]) ? 'no' : ask( cyan(" ==> Do you want to see the generated file before commiting the writing (y|N)"), 'No')
puts content if watch =~ /y.*/i
end
proceed = (options[:no_interaction]) ? 'yes' : ask( cyan(" ==> proceed with the writing (Y|n)"), 'Yes')
return 0 if proceed =~ /n.*/i
info("=> writing #{outfile}")
File.open(outfile.to_s, "w+") do |f|
f.write content
end
if FalkorLib::Git.init?(File.dirname(outfile)) && !options[:no_commit]
do_commit = (options[:no_interaction]) ? 'yes' : ask( cyan(" ==> commit the changes (Y|n)"), 'Yes')
FalkorLib::Git.add(outfile, "update content of '#{File.basename(outfile)}'") if do_commit =~ /y.*/i
end
1
end
## Blind copy of a source file `src` into its destination directory `dstdir`
# Supported options:
# :no_interaction [boolean]: do not interact
# :srcdir [string]: source directory, make the `src` file relative to that directory
# :outfile [string]: alter the outfile name (File.basename(src) by default)
# :no_commit [boolean]: do not (offer to) commit the changes
def write_from_template(src, dstdir, options = {
:no_interaction => false,
:no_commit => false,
:srcdir => '',
:outfile => ''
})
srcfile = (options[:srcdir].nil?) ? src : File.join(options[:srcdir], src)
error "Unable to find the source file #{srcfile}" unless File.exist?( srcfile )
error "The destination directory '#{dstdir}' do not exist" unless File.directory?( dstdir )
dstfile = (options[:outfile].nil?) ? File.basename(srcfile) : options[:outfile]
outfile = File.join(dstdir, dstfile)
content = File.read( srcfile )
show_diff_and_write(content, outfile, options)
end # copy_from_template
### RVM init
def init_rvm(rootdir = Dir.pwd, gemset = '')
rvm_files = {
:version => File.join(rootdir, '.ruby-version'),
:gemset => File.join(rootdir, '.ruby-gemset')
}
unless File.exist?( (rvm_files[:version]).to_s)
v = select_from(FalkorLib.config[:rvm][:rubies],
"Select RVM ruby to configure for this directory",
3)
File.open( rvm_files[:version], 'w') do |f|
f.puts v
end
end
unless File.exist?( (rvm_files[:gemset]).to_s)
g = (gemset.empty?) ? ask("Enter RVM gemset name for this directory", File.basename(rootdir)) : gemset
File.open( rvm_files[:gemset], 'w') do |f|
f.puts g
end
end
end
###### normalize_path ######
# Normalize a path and return the absolute path foreseen
# Ex: '.' return Dir.pwd
# Supported options:
# * :relative [boolean] return relative path to the root dir
##
def normalized_path(dir = Dir.pwd, options = {})
rootdir = (FalkorLib::Git.init?(dir)) ? FalkorLib::Git.rootdir(dir) : dir
path = dir
path = Dir.pwd if dir == '.'
path = File.join(Dir.pwd, dir) unless (dir =~ /^\// || (dir == '.'))
if (options[:relative] || options[:relative_to])
root = (options[:relative_to]) ? options[:relative_to] : rootdir
relative_path_to_root = Pathname.new( File.realpath(path) ).relative_path_from Pathname.new(root)
path = relative_path_to_root.to_s
end
path
end # normalize_path
end
|
mongodb/mongoid | lib/mongoid/serializable.rb | Mongoid.Serializable.relation_names | ruby | def relation_names(inclusions)
inclusions.is_a?(Hash) ? inclusions.keys : Array.wrap(inclusions)
end | Since the inclusions can be a hash, symbol, or array of symbols, this is
provided as a convenience to parse out the names.
@example Get the association names.
document.relation_names(:include => [ :addresses ])
@param [ Hash, Symbol, Array<Symbol> ] inclusions The inclusions.
@return [ Array<Symbol> ] The names of the included associations.
@since 2.0.0.rc.6 | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/serializable.rb#L147-L149 | module Serializable
extend ActiveSupport::Concern
# We need to redefine where the JSON configuration is getting defined,
# similar to +ActiveRecord+.
included do
undef_method :include_root_in_json
delegate :include_root_in_json, to: ::Mongoid
end
# Gets the document as a serializable hash, used by ActiveModel's JSON
# serializer.
#
# @example Get the serializable hash.
# document.serializable_hash
#
# @example Get the serializable hash with options.
# document.serializable_hash(:include => :addresses)
#
# @param [ Hash ] options The options to pass.
#
# @option options [ Symbol ] :include What associations to include.
# @option options [ Symbol ] :only Limit the fields to only these.
# @option options [ Symbol ] :except Dont include these fields.
# @option options [ Symbol ] :methods What methods to include.
#
# @return [ Hash ] The document, ready to be serialized.
#
# @since 2.0.0.rc.6
def serializable_hash(options = nil)
options ||= {}
attrs = {}
names = field_names(options)
method_names = Array.wrap(options[:methods]).map do |name|
name.to_s if respond_to?(name)
end.compact
(names + method_names).each do |name|
without_autobuild do
serialize_attribute(attrs, name, names, options)
end
end
serialize_relations(attrs, options) if options[:include]
attrs
end
private
# Get the names of all fields that will be serialized.
#
# @api private
#
# @example Get all the field names.
# document.send(:field_names)
#
# @return [ Array<String> ] The names of the fields.
#
# @since 3.0.0
def field_names(options)
names = (as_attributes.keys + attribute_names).uniq.sort
only = Array.wrap(options[:only]).map(&:to_s)
except = Array.wrap(options[:except]).map(&:to_s)
except |= ['_type'] unless Mongoid.include_type_for_serialization
if !only.empty?
names &= only
elsif !except.empty?
names -= except
end
names
end
# Serialize a single attribute. Handles associations, fields, and dynamic
# attributes.
#
# @api private
#
# @example Serialize the attribute.
# document.serialize_attribute({}, "id" , [ "id" ])
#
# @param [ Hash ] attrs The attributes.
# @param [ String ] name The attribute name.
# @param [ Array<String> ] names The names of all attributes.
# @param [ Hash ] options The options.
#
# @return [ Object ] The attribute.
#
# @since 3.0.0
def serialize_attribute(attrs, name, names, options)
if relations.key?(name)
value = send(name)
attrs[name] = value ? value.serializable_hash(options) : nil
elsif names.include?(name) && !fields.key?(name)
attrs[name] = read_raw_attribute(name)
elsif !attribute_missing?(name)
attrs[name] = send(name)
end
end
# For each of the provided include options, get the association needed and
# provide it in the hash.
#
# @example Serialize the included associations.
# document.serialize_relations({}, :include => :addresses)
#
# @param [ Hash ] attributes The attributes to serialize.
# @param [ Hash ] options The serialization options.
#
# @option options [ Symbol ] :include What associations to include
# @option options [ Symbol ] :only Limit the fields to only these.
# @option options [ Symbol ] :except Dont include these fields.
#
# @since 2.0.0.rc.6
def serialize_relations(attributes = {}, options = {})
inclusions = options[:include]
relation_names(inclusions).each do |name|
association = relations[name.to_s]
if association && relation = send(association.name)
attributes[association.name.to_s] =
relation.serializable_hash(relation_options(inclusions, options, name))
end
end
end
# Since the inclusions can be a hash, symbol, or array of symbols, this is
# provided as a convenience to parse out the names.
#
# @example Get the association names.
# document.relation_names(:include => [ :addresses ])
#
# @param [ Hash, Symbol, Array<Symbol> ] inclusions The inclusions.
#
# @return [ Array<Symbol> ] The names of the included associations.
#
# @since 2.0.0.rc.6
# Since the inclusions can be a hash, symbol, or array of symbols, this is
# provided as a convenience to parse out the options.
#
# @example Get the association options.
# document.relation_names(:include => [ :addresses ])
#
# @param [ Hash, Symbol, Array<Symbol> ] inclusions The inclusions.
# @param [ Hash ] options The options.
# @param [ Symbol ] name The name of the association.
#
# @return [ Hash ] The options for the association.
#
# @since 2.0.0.rc.6
def relation_options(inclusions, options, name)
if inclusions.is_a?(Hash)
inclusions[name]
else
{ except: options[:except], only: options[:only] }
end
end
end
|
litaio/lita | lib/lita/default_configuration.rb | Lita.DefaultConfiguration.handlers_config | ruby | def handlers_config
handlers = registry.handlers
root.config :handlers do
handlers.each do |handler|
if handler.configuration_builder.children?
combine(handler.namespace, handler.configuration_builder)
end
end
end
end | Builds config.handlers | train | https://github.com/litaio/lita/blob/c1a1f85f791b74e40ee6a1e2d53f19b5f7cbe0ba/lib/lita/default_configuration.rb#L54-L64 | class DefaultConfiguration
# Valid levels for Lita's logger.
LOG_LEVELS = %w[debug info warn error fatal].freeze
# A {Registry} to extract configuration for plugins from.
# @return [Registry] The registry.
attr_reader :registry
# The top-level {ConfigurationBuilder} attribute.
# @return [Configuration] The root attribute.
attr_reader :root
# @param registry [Registry] The registry to build a default configuration object from.
def initialize(registry)
@registry = registry
@root = ConfigurationBuilder.new
adapters_config
handlers_config
http_config
redis_config
robot_config
end
# Processes the {ConfigurationBuilder} object to return a {Configuration}.
# @return [Configuration] The built configuration object.
def build
root.build
end
private
# Builds config.adapters
def adapters_config
adapters = registry.adapters
root.config :adapters do
adapters.each do |key, adapter|
combine(key, adapter.configuration_builder)
end
end
end
# Builds config.handlers
# Builds config.http
def http_config
root.config :http do
config :host, type: String, default: "0.0.0.0"
config :port, type: [Integer, String], default: 8080
config :min_threads, type: [Integer, String], default: 0
config :max_threads, type: [Integer, String], default: 16
config :middleware, type: MiddlewareRegistry, default: MiddlewareRegistry.new
end
end
# Builds config.redis
def redis_config
root.config :redis, type: Hash, default: {}
end
# Builds config.robot
def robot_config
root.config :robot do
config :name, type: String, default: "Lita"
config :mention_name, type: String
config :alias, type: String
config :adapter, types: [String, Symbol], default: :shell
config :locale, types: [String, Symbol], default: I18n.locale
config :log_level, types: [String, Symbol], default: :info do
validate do |value|
unless LOG_LEVELS.include?(value.to_s.downcase.strip)
"must be one of: #{LOG_LEVELS.join(', ')}"
end
end
end
config :log_formatter, type: Proc, default: (lambda do |severity, datetime, _progname, msg|
"[#{datetime.utc}] #{severity}: #{msg}\n"
end)
config :admins
config :error_handler, default: ->(_error, _metadata) {} do
validate do |value|
"must respond to #call" unless value.respond_to?(:call)
end
end
end
end
end
|
hashicorp/vagrant | lib/vagrant/environment.rb | Vagrant.Environment.batch | ruby | def batch(parallel=true)
parallel = false if ENV["VAGRANT_NO_PARALLEL"]
@batch_lock.synchronize do
BatchAction.new(parallel).tap do |b|
# Yield it so that the caller can setup actions
yield b
# And run it!
b.run
end
end
end | This creates a new batch action, yielding it, and then running it
once the block is called.
This handles the case where batch actions are disabled by the
VAGRANT_NO_PARALLEL environmental variable. | train | https://github.com/hashicorp/vagrant/blob/c22a145c59790c098f95d50141d9afb48e1ef55f/lib/vagrant/environment.rb#L272-L284 | class Environment
# This is the current version that this version of Vagrant is
# compatible with in the home directory.
#
# @return [String]
CURRENT_SETUP_VERSION = "1.5"
DEFAULT_LOCAL_DATA = ".vagrant"
# The `cwd` that this environment represents
attr_reader :cwd
# The persistent data directory where global data can be stored. It
# is up to the creator of the data in this directory to properly
# remove it when it is no longer needed.
#
# @return [Pathname]
attr_reader :data_dir
# The valid name for a Vagrantfile for this environment.
attr_reader :vagrantfile_name
# The {UI} object to communicate with the outside world.
attr_reader :ui
# This is the UI class to use when creating new UIs.
attr_reader :ui_class
# The directory to the "home" folder that Vagrant will use to store
# global state.
attr_reader :home_path
# The directory to the directory where local, environment-specific
# data is stored.
attr_reader :local_data_path
# The directory where temporary files for Vagrant go.
attr_reader :tmp_path
# File where command line aliases go.
attr_reader :aliases_path
# The directory where boxes are stored.
attr_reader :boxes_path
# The path where the plugins are stored (gems)
attr_reader :gems_path
# The path to the default private key
attr_reader :default_private_key_path
# Initializes a new environment with the given options. The options
# is a hash where the main available key is `cwd`, which defines where
# the environment represents. There are other options available but
# they shouldn't be used in general. If `cwd` is nil, then it defaults
# to the `Dir.pwd` (which is the cwd of the executing process).
def initialize(opts=nil)
opts = {
cwd: nil,
home_path: nil,
local_data_path: nil,
ui_class: nil,
vagrantfile_name: nil,
}.merge(opts || {})
# Set the default working directory to look for the vagrantfile
opts[:cwd] ||= ENV["VAGRANT_CWD"] if ENV.key?("VAGRANT_CWD")
opts[:cwd] ||= Dir.pwd
opts[:cwd] = Pathname.new(opts[:cwd])
if !opts[:cwd].directory?
raise Errors::EnvironmentNonExistentCWD, cwd: opts[:cwd].to_s
end
opts[:cwd] = opts[:cwd].expand_path
# Set the default ui class
opts[:ui_class] ||= UI::Silent
# Set the Vagrantfile name up. We append "Vagrantfile" and "vagrantfile" so that
# those continue to work as well, but anything custom will take precedence.
opts[:vagrantfile_name] ||= ENV["VAGRANT_VAGRANTFILE"] if \
ENV.key?("VAGRANT_VAGRANTFILE")
opts[:vagrantfile_name] = [opts[:vagrantfile_name]] if \
opts[:vagrantfile_name] && !opts[:vagrantfile_name].is_a?(Array)
# Set instance variables for all the configuration parameters.
@cwd = opts[:cwd]
@home_path = opts[:home_path]
@vagrantfile_name = opts[:vagrantfile_name]
@ui = opts[:ui_class].new
@ui_class = opts[:ui_class]
# This is the batch lock, that enforces that only one {BatchAction}
# runs at a time from {#batch}.
@batch_lock = Mutex.new
@locks = {}
@logger = Log4r::Logger.new("vagrant::environment")
@logger.info("Environment initialized (#{self})")
@logger.info(" - cwd: #{cwd}")
# Setup the home directory
@home_path ||= Vagrant.user_data_path
@home_path = Util::Platform.fs_real_path(@home_path)
@boxes_path = @home_path.join("boxes")
@data_dir = @home_path.join("data")
@gems_path = Vagrant::Bundler.instance.plugin_gem_path
@tmp_path = @home_path.join("tmp")
@machine_index_dir = @data_dir.join("machine-index")
@aliases_path = Pathname.new(ENV["VAGRANT_ALIAS_FILE"]).expand_path if ENV.key?("VAGRANT_ALIAS_FILE")
@aliases_path ||= @home_path.join("aliases")
# Prepare the directories
setup_home_path
# Setup the local data directory. If a configuration path is given,
# it is expanded relative to the root path. Otherwise, we use the
# default (which is also expanded relative to the root path).
if !root_path.nil?
if !ENV["VAGRANT_DOTFILE_PATH"].to_s.empty? && !opts[:child]
opts[:local_data_path] ||= Pathname.new(File.expand_path(ENV["VAGRANT_DOTFILE_PATH"], root_path))
else
opts[:local_data_path] ||= root_path.join(DEFAULT_LOCAL_DATA)
end
end
if opts[:local_data_path]
@local_data_path = Pathname.new(File.expand_path(opts[:local_data_path], @cwd))
end
@logger.debug("Effective local data path: #{@local_data_path}")
# If we have a root path, load the ".vagrantplugins" file.
if root_path
plugins_file = root_path.join(".vagrantplugins")
if plugins_file.file?
@logger.info("Loading plugins file: #{plugins_file}")
load plugins_file
end
end
setup_local_data_path
# Setup the default private key
@default_private_key_path = @home_path.join("insecure_private_key")
copy_insecure_private_key
# Initialize localized plugins
plugins = Vagrant::Plugin::Manager.instance.localize!(self)
# Load any environment local plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
# Initialize globalize plugins
plugins = Vagrant::Plugin::Manager.instance.globalize!
# Load any global plugins
Vagrant::Plugin::Manager.instance.load_plugins(plugins)
if !vagrantfile.config.vagrant.plugins.empty?
plugins = process_configured_plugins
end
# Call the hooks that does not require configurations to be loaded
# by using a "clean" action runner
hook(:environment_plugins_loaded, runner: Action::Runner.new(env: self))
# Call the environment load hooks
hook(:environment_load, runner: Action::Runner.new(env: self))
end
# Return a human-friendly string for pretty printed or inspected
# instances.
#
# @return [String]
def inspect
"#<#{self.class}: #{@cwd}>".encode('external')
end
# Action runner for executing actions in the context of this environment.
#
# @return [Action::Runner]
def action_runner
@action_runner ||= Action::Runner.new do
{
action_runner: action_runner,
box_collection: boxes,
hook: method(:hook),
host: host,
machine_index: machine_index,
gems_path: gems_path,
home_path: home_path,
root_path: root_path,
tmp_path: tmp_path,
ui: @ui,
env: self
}
end
end
# Returns a list of machines that this environment is currently
# managing that physically have been created.
#
# An "active" machine is a machine that Vagrant manages that has
# been created. The machine itself may be in any state such as running,
# suspended, etc. but if a machine is "active" then it exists.
#
# Note that the machines in this array may no longer be present in
# the Vagrantfile of this environment. In this case the machine can
# be considered an "orphan." Determining which machines are orphan
# and which aren't is not currently a supported feature, but will
# be in a future version.
#
# @return [Array<String, Symbol>]
def active_machines
# We have no active machines if we have no data path
return [] if !@local_data_path
machine_folder = @local_data_path.join("machines")
# If the machine folder is not a directory then we just return
# an empty array since no active machines exist.
return [] if !machine_folder.directory?
# Traverse the machines folder accumulate a result
result = []
machine_folder.children(true).each do |name_folder|
# If this isn't a directory then it isn't a machine
next if !name_folder.directory?
name = name_folder.basename.to_s.to_sym
name_folder.children(true).each do |provider_folder|
# If this isn't a directory then it isn't a provider
next if !provider_folder.directory?
# If this machine doesn't have an ID, then ignore
next if !provider_folder.join("id").file?
provider = provider_folder.basename.to_s.to_sym
result << [name, provider]
end
end
# Return the results
result
end
# This creates a new batch action, yielding it, and then running it
# once the block is called.
#
# This handles the case where batch actions are disabled by the
# VAGRANT_NO_PARALLEL environmental variable.
# Makes a call to the CLI with the given arguments as if they
# came from the real command line (sometimes they do!). An example:
#
# env.cli("package", "--vagrantfile", "Vagrantfile")
#
def cli(*args)
CLI.new(args.flatten, self).execute
end
# This returns the provider name for the default provider for this
# environment.
#
# @return [Symbol] Name of the default provider.
def default_provider(**opts)
opts[:exclude] = Set.new(opts[:exclude]) if opts[:exclude]
opts[:force_default] = true if !opts.key?(:force_default)
opts[:check_usable] = true if !opts.key?(:check_usable)
# Implement the algorithm from
# https://www.vagrantup.com/docs/providers/basic_usage.html#default-provider
# with additional steps 2.5 and 3.5 from
# https://bugzilla.redhat.com/show_bug.cgi?id=1444492
# to allow system-configured provider priorities.
#
# 1. The --provider flag on a vagrant up is chosen above all else, if it is
# present.
#
# (Step 1 is done by the caller; this method is only called if --provider
# wasn't given.)
#
# 2. If the VAGRANT_DEFAULT_PROVIDER environmental variable is set, it
# takes next priority and will be the provider chosen.
default = ENV["VAGRANT_DEFAULT_PROVIDER"].to_s
if default.empty?
default = nil
else
default = default.to_sym
@logger.debug("Default provider: `#{default}`")
end
# If we're forcing the default, just short-circuit and return
# that (the default behavior)
if default && opts[:force_default]
@logger.debug("Using forced default provider: `#{default}`")
return default
end
# Determine the config to use to look for provider definitions. By
# default it is the global but if we're targeting a specific machine,
# then look there.
root_config = vagrantfile.config
if opts[:machine]
machine_info = vagrantfile.machine_config(opts[:machine], nil, nil, nil)
root_config = machine_info[:config]
end
# Get the list of providers within our configuration, in order.
config = root_config.vm.__providers
# Get the list of usable providers with their internally-declared
# priorities.
usable = []
Vagrant.plugin("2").manager.providers.each do |key, data|
impl = data[0]
popts = data[1]
# Skip excluded providers
next if opts[:exclude] && opts[:exclude].include?(key)
# Skip providers that can't be defaulted, unless they're in our
# config, in which case someone made our decision for us.
if !config.include?(key)
next if popts.key?(:defaultable) && !popts[:defaultable]
end
# Skip providers that aren't usable.
next if opts[:check_usable] && !impl.usable?(false)
# Each provider sets its own priority, defaulting to 5 so we can trust
# it's always set.
usable << [popts[:priority], key]
end
@logger.debug("Initial usable provider list: #{usable}")
# Sort the usable providers by priority. Higher numbers are higher
# priority, otherwise alpha sort.
usable = usable.sort {|a, b| a[0] == b[0] ? a[1] <=> b[1] : b[0] <=> a[0]}
.map {|prio, key| key}
@logger.debug("Priority sorted usable provider list: #{usable}")
# If we're not forcing the default, but it's usable and hasn't been
# otherwise excluded, return it now.
if usable.include?(default)
@logger.debug("Using default provider `#{default}` as it was found in usable list.")
return default
end
# 2.5. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first
# provider that is usable and listed in VAGRANT_PREFERRED_PROVIDERS.
preferred = ENV.fetch('VAGRANT_PREFERRED_PROVIDERS', '')
.split(',')
.map {|s| s.strip}
.select {|s| !s.empty?}
.map {|s| s.to_sym}
@logger.debug("Preferred provider list: #{preferred}")
config.each do |key|
if usable.include?(key) && preferred.include?(key)
@logger.debug("Using preferred provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3. Vagrant will go through all of the config.vm.provider calls in the
# Vagrantfile and try each in order. It will choose the first provider
# that is usable. For example, if you configure Hyper-V, it will never
# be chosen on Mac this way. It must be both configured and usable.
config.each do |key|
if usable.include?(key)
@logger.debug("Using provider `#{key}` detected in configuration and usable.")
return key
end
end
# 3.5. Vagrant will go through VAGRANT_PREFERRED_PROVIDERS and find the
# first plugin that reports it is usable.
preferred.each do |key|
if usable.include?(key)
@logger.debug("Using preferred provider `#{key}` found in usable list.")
return key
end
end
# 4. Vagrant will go through all installed provider plugins (including the
# ones that come with Vagrant), and find the first plugin that reports
# it is usable. There is a priority system here: systems that are known
# better have a higher priority than systems that are worse. For
# example, if you have the VMware provider installed, it will always
# take priority over VirtualBox.
if !usable.empty?
@logger.debug("Using provider `#{usable[0]}` as it is the highest priority in the usable list.")
return usable[0]
end
# 5. If Vagrant still has not found any usable providers, it will error.
# No providers available is a critical error for Vagrant.
raise Errors::NoDefaultProvider
end
# Returns whether or not we know how to install the provider with
# the given name.
#
# @return [Boolean]
def can_install_provider?(name)
host.capability?(provider_install_key(name))
end
# Installs the provider with the given name.
#
# This will raise an exception if we don't know how to install the
# provider with the given name. You should guard this call with
# `can_install_provider?` for added safety.
#
# An exception will be raised if there are any failures installing
# the provider.
def install_provider(name)
host.capability(provider_install_key(name))
end
# Returns the collection of boxes for the environment.
#
# @return [BoxCollection]
def boxes
@_boxes ||= BoxCollection.new(
boxes_path,
hook: method(:hook),
temp_dir_root: tmp_path)
end
# Returns the {Config::Loader} that can be used to load Vagrantfiles
# given the settings of this environment.
#
# @return [Config::Loader]
def config_loader
return @config_loader if @config_loader
home_vagrantfile = nil
root_vagrantfile = nil
home_vagrantfile = find_vagrantfile(home_path) if home_path
if root_path
root_vagrantfile = find_vagrantfile(root_path, @vagrantfile_name)
end
@config_loader = Config::Loader.new(
Config::VERSIONS, Config::VERSIONS_ORDER)
@config_loader.set(:home, home_vagrantfile) if home_vagrantfile
@config_loader.set(:root, root_vagrantfile) if root_vagrantfile
@config_loader
end
# Loads another environment for the given Vagrantfile, sharing as much
# useful state from this Environment as possible (such as UI and paths).
# Any initialization options can be overidden using the opts hash.
#
# @param [String] vagrantfile Path to a Vagrantfile
# @return [Environment]
def environment(vagrantfile, **opts)
path = File.expand_path(vagrantfile, root_path)
file = File.basename(path)
path = File.dirname(path)
Util::SilenceWarnings.silence! do
Environment.new({
child: true,
cwd: path,
home_path: home_path,
ui_class: ui_class,
vagrantfile_name: file,
}.merge(opts))
end
end
# This defines a hook point where plugin action hooks that are registered
# against the given name will be run in the context of this environment.
#
# @param [Symbol] name Name of the hook.
# @param [Action::Runner] action_runner A custom action runner for running hooks.
def hook(name, opts=nil)
@logger.info("Running hook: #{name}")
opts ||= {}
opts[:callable] ||= Action::Builder.new
opts[:runner] ||= action_runner
opts[:action_name] = name
opts[:env] = self
opts.delete(:runner).run(opts.delete(:callable), opts)
end
# Returns the host object associated with this environment.
#
# @return [Class]
def host
return @host if defined?(@host)
# Determine the host class to use. ":detect" is an old Vagrant config
# that shouldn't be valid anymore, but we respect it here by assuming
# its old behavior. No need to deprecate this because I thin it is
# fairly harmless.
host_klass = vagrantfile.config.vagrant.host
host_klass = nil if host_klass == :detect
begin
@host = Host.new(
host_klass,
Vagrant.plugin("2").manager.hosts,
Vagrant.plugin("2").manager.host_capabilities,
self)
rescue Errors::CapabilityHostNotDetected
# If the auto-detect failed, then we create a brand new host
# with no capabilities and use that. This should almost never happen
# since Vagrant works on most host OS's now, so this is a "slow path"
klass = Class.new(Vagrant.plugin("2", :host)) do
def detect?(env); true; end
end
hosts = { generic: [klass, nil] }
host_caps = {}
@host = Host.new(:generic, hosts, host_caps, self)
rescue Errors::CapabilityHostExplicitNotDetected => e
raise Errors::HostExplicitNotDetected, e.extra_data
end
end
# This acquires a process-level lock with the given name.
#
# The lock file is held within the data directory of this environment,
# so make sure that all environments that are locking are sharing
# the same data directory.
#
# This will raise Errors::EnvironmentLockedError if the lock can't
# be obtained.
#
# @param [String] name Name of the lock, since multiple locks can
# be held at one time.
def lock(name="global", **opts)
f = nil
# If we don't have a block, then locking is useless, so ignore it
return if !block_given?
# This allows multiple locks in the same process to be nested
return yield if @locks[name] || opts[:noop]
# The path to this lock
lock_path = data_dir.join("lock.#{name}.lock")
@logger.debug("Attempting to acquire process-lock: #{name}")
lock("dotlock", noop: name == "dotlock", retry: true) do
f = File.open(lock_path, "w+")
end
# The file locking fails only if it returns "false." If it
# succeeds it returns a 0, so we must explicitly check for
# the proper error case.
while f.flock(File::LOCK_EX | File::LOCK_NB) === false
@logger.warn("Process-lock in use: #{name}")
if !opts[:retry]
raise Errors::EnvironmentLockedError,
name: name
end
sleep 0.2
end
@logger.info("Acquired process lock: #{name}")
result = nil
begin
# Mark that we have a lock
@locks[name] = true
result = yield
ensure
# We need to make sure that no matter what this is always
# reset to false so we don't think we have a lock when we
# actually don't.
@locks.delete(name)
@logger.info("Released process lock: #{name}")
end
# Clean up the lock file, this requires another lock
if name != "dotlock"
lock("dotlock", retry: true) do
f.close
begin
File.delete(lock_path)
rescue
@logger.error(
"Failed to delete lock file #{lock_path} - some other thread " +
"might be trying to acquire it. ignoring this error")
end
end
end
# Return the result
return result
ensure
begin
f.close if f
rescue IOError
end
end
# This executes the push with the given name, raising any exceptions that
# occur.
#
# Precondition: the push is not nil and exists.
def push(name)
@logger.info("Getting push: #{name}")
name = name.to_sym
pushes = self.vagrantfile.config.push.__compiled_pushes
if !pushes.key?(name)
raise Vagrant::Errors::PushStrategyNotDefined,
name: name,
pushes: pushes.keys
end
strategy, config = pushes[name]
push_registry = Vagrant.plugin("2").manager.pushes
klass, _ = push_registry.get(strategy)
if klass.nil?
raise Vagrant::Errors::PushStrategyNotLoaded,
name: strategy,
pushes: push_registry.keys
end
klass.new(self, config).push
end
# The list of pushes defined in this Vagrantfile.
#
# @return [Array<Symbol>]
def pushes
self.vagrantfile.config.push.__compiled_pushes.keys
end
# This returns a machine with the proper provider for this environment.
# The machine named by `name` must be in this environment.
#
# @param [Symbol] name Name of the machine (as configured in the
# Vagrantfile).
# @param [Symbol] provider The provider that this machine should be
# backed by.
# @param [Boolean] refresh If true, then if there is a cached version
# it is reloaded.
# @return [Machine]
def machine(name, provider, refresh=false)
@logger.info("Getting machine: #{name} (#{provider})")
# Compose the cache key of the name and provider, and return from
# the cache if we have that.
cache_key = [name, provider]
@machines ||= {}
if refresh
@logger.info("Refreshing machine (busting cache): #{name} (#{provider})")
@machines.delete(cache_key)
end
if @machines.key?(cache_key)
@logger.info("Returning cached machine: #{name} (#{provider})")
return @machines[cache_key]
end
@logger.info("Uncached load of machine.")
# Determine the machine data directory and pass it to the machine.
machine_data_path = @local_data_path.join(
"machines/#{name}/#{provider}")
# Create the machine and cache it for future calls. This will also
# return the machine from this method.
@machines[cache_key] = vagrantfile.machine(
name, provider, boxes, machine_data_path, self)
end
# The {MachineIndex} to store information about the machines.
#
# @return [MachineIndex]
def machine_index
@machine_index ||= MachineIndex.new(@machine_index_dir)
end
# This returns a list of the configured machines for this environment.
# Each of the names returned by this method is valid to be used with
# the {#machine} method.
#
# @return [Array<Symbol>] Configured machine names.
def machine_names
vagrantfile.machine_names
end
# This returns the name of the machine that is the "primary." In the
# case of a single-machine environment, this is just the single machine
# name. In the case of a multi-machine environment, then this can
# potentially be nil if no primary machine is specified.
#
# @return [Symbol]
def primary_machine_name
vagrantfile.primary_machine_name
end
# The root path is the path where the top-most (loaded last)
# Vagrantfile resides. It can be considered the project root for
# this environment.
#
# @return [String]
def root_path
return @root_path if defined?(@root_path)
root_finder = lambda do |path|
# Note: To remain compatible with Ruby 1.8, we have to use
# a `find` here instead of an `each`.
vf = find_vagrantfile(path, @vagrantfile_name)
return path if vf
return nil if path.root? || !File.exist?(path)
root_finder.call(path.parent)
end
@root_path = root_finder.call(cwd)
end
# Unload the environment, running completion hooks. The environment
# should not be used after this (but CAN be, technically). It is
# recommended to always immediately set the variable to `nil` after
# running this so you can't accidentally run any more methods. Example:
#
# env.unload
# env = nil
#
def unload
hook(:environment_unload)
end
# Represents the default Vagrantfile, or the Vagrantfile that is
# in the working directory or a parent of the working directory
# of this environment.
#
# The existence of this function is primarily a convenience. There
# is nothing stopping you from instantiating your own {Vagrantfile}
# and loading machines in any way you see fit. Typical behavior of
# Vagrant, however, loads this Vagrantfile.
#
# This Vagrantfile is comprised of two major sources: the Vagrantfile
# in the user's home directory as well as the "root" Vagrantfile or
# the Vagrantfile in the working directory (or parent).
#
# @return [Vagrantfile]
def vagrantfile
@vagrantfile ||= Vagrantfile.new(config_loader, [:home, :root])
end
#---------------------------------------------------------------
# Load Methods
#---------------------------------------------------------------
# This sets the `@home_path` variable properly.
#
# @return [Pathname]
def setup_home_path
@logger.info("Home path: #{@home_path}")
# Setup the list of child directories that need to be created if they
# don't already exist.
dirs = [
@home_path,
@home_path.join("rgloader"),
@boxes_path,
@data_dir,
@gems_path,
@tmp_path,
@machine_index_dir,
]
# Go through each required directory, creating it if it doesn't exist
dirs.each do |dir|
next if File.directory?(dir)
begin
@logger.info("Creating: #{dir}")
FileUtils.mkdir_p(dir)
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
end
# Attempt to write into the home directory to verify we can
begin
# Append a random suffix to avoid race conditions if Vagrant
# is running in parallel with other Vagrant processes.
suffix = (0...32).map { (65 + rand(26)).chr }.join
path = @home_path.join("perm_test_#{suffix}")
path.open("w") do |f|
f.write("hello")
end
path.unlink
rescue Errno::EACCES
raise Errors::HomeDirectoryNotAccessible, home_path: @home_path.to_s
end
# Create the version file that we use to track the structure of
# the home directory. If we have an old version, we need to explicitly
# upgrade it. Otherwise, we just mark that its the current version.
version_file = @home_path.join("setup_version")
if version_file.file?
version = version_file.read.chomp
if version > CURRENT_SETUP_VERSION
raise Errors::HomeDirectoryLaterVersion
end
case version
when CURRENT_SETUP_VERSION
# We're already good, at the latest version.
when "1.1"
# We need to update our directory structure
upgrade_home_path_v1_1
# Delete the version file so we put our latest version in
version_file.delete
else
raise Errors::HomeDirectoryUnknownVersion,
path: @home_path.to_s,
version: version
end
end
if !version_file.file?
@logger.debug(
"Creating home directory version file: #{CURRENT_SETUP_VERSION}")
version_file.open("w") do |f|
f.write(CURRENT_SETUP_VERSION)
end
end
# Create the rgloader/loader file so we can use encoded files.
loader_file = @home_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
end
# This creates the local data directory and show an error if it
# couldn't properly be created.
def setup_local_data_path(force=false)
if @local_data_path.nil?
@logger.warn("No local data path is set. Local data cannot be stored.")
return
end
@logger.info("Local data path: #{@local_data_path}")
# If the local data path is a file, then we are probably seeing an
# old (V1) "dotfile." In this case, we upgrade it. The upgrade process
# will remove the old data file if it is successful.
if @local_data_path.file?
upgrade_v1_dotfile(@local_data_path)
end
# If we don't have a root path, we don't setup anything
return if !force && root_path.nil?
begin
@logger.debug("Creating: #{@local_data_path}")
FileUtils.mkdir_p(@local_data_path)
# Create the rgloader/loader file so we can use encoded files.
loader_file = @local_data_path.join("rgloader", "loader.rb")
if !loader_file.file?
source_loader = Vagrant.source_root.join("templates/rgloader.rb")
FileUtils.mkdir_p(@local_data_path.join("rgloader").to_s)
FileUtils.cp(source_loader.to_s, loader_file.to_s)
end
rescue Errno::EACCES
raise Errors::LocalDataDirectoryNotAccessible,
local_data_path: @local_data_path.to_s
end
end
protected
# Check for any local plugins defined within the Vagrantfile. If
# found, validate they are available. If they are not available,
# request to install them, or raise an exception
#
# @return [Hash] plugin list for loading
def process_configured_plugins
return if !Vagrant.plugins_enabled?
errors = vagrantfile.config.vagrant.validate(nil)
if !errors["vagrant"].empty?
raise Errors::ConfigInvalid,
errors: Util::TemplateRenderer.render(
"config/validation_failed",
errors: errors)
end
# Check if defined plugins are installed
installed = Plugin::Manager.instance.installed_plugins
needs_install = []
config_plugins = vagrantfile.config.vagrant.plugins
config_plugins.each do |name, info|
if !installed[name]
needs_install << name
end
end
if !needs_install.empty?
ui.warn(I18n.t("vagrant.plugins.local.uninstalled_plugins",
plugins: needs_install.sort.join(", ")))
if !Vagrant.auto_install_local_plugins?
answer = nil
until ["y", "n"].include?(answer)
answer = ui.ask(I18n.t("vagrant.plugins.local.request_plugin_install") + " [N]: ")
answer = answer.strip.downcase
answer = "n" if answer.to_s.empty?
end
if answer == "n"
raise Errors::PluginMissingLocalError,
plugins: needs_install.sort.join(", ")
end
end
needs_install.each do |name|
pconfig = Util::HashWithIndifferentAccess.new(config_plugins[name])
ui.info(I18n.t("vagrant.commands.plugin.installing", name: name))
options = {sources: Vagrant::Bundler::DEFAULT_GEM_SOURCES.dup, env_local: true}
options[:sources] = pconfig[:sources] if pconfig[:sources]
options[:require] = pconfig[:entry_point] if pconfig[:entry_point]
options[:version] = pconfig[:version] if pconfig[:version]
spec = Plugin::Manager.instance.install_plugin(name, options)
ui.info(I18n.t("vagrant.commands.plugin.installed",
name: spec.name, version: spec.version.to_s))
end
ui.info("\n")
# Force halt after installation and require command to be run again. This
# will proper load any new locally installed plugins which are now available.
ui.warn(I18n.t("vagrant.plugins.local.install_rerun_command"))
exit(-1)
end
Vagrant::Plugin::Manager.instance.local_file.installed_plugins
end
# This method copies the private key into the home directory if it
# doesn't already exist.
#
# This must be done because `ssh` requires that the key is chmod
# 0600, but if Vagrant is installed as a separate user, then the
# effective uid won't be able to read the key. So the key is copied
# to the home directory and chmod 0600.
def copy_insecure_private_key
if !@default_private_key_path.exist?
@logger.info("Copying private key to home directory")
source = File.expand_path("keys/vagrant", Vagrant.source_root)
destination = @default_private_key_path
begin
FileUtils.cp(source, destination)
rescue Errno::EACCES
raise Errors::CopyPrivateKeyFailed,
source: source,
destination: destination
end
end
if !Util::Platform.windows?
# On Windows, permissions don't matter as much, so don't worry
# about doing chmod.
if Util::FileMode.from_octal(@default_private_key_path.stat.mode) != "600"
@logger.info("Changing permissions on private key to 0600")
@default_private_key_path.chmod(0600)
end
end
end
# Finds the Vagrantfile in the given directory.
#
# @param [Pathname] path Path to search in.
# @return [Pathname]
def find_vagrantfile(search_path, filenames=nil)
filenames ||= ["Vagrantfile", "vagrantfile"]
filenames.each do |vagrantfile|
current_path = search_path.join(vagrantfile)
return current_path if current_path.file?
end
nil
end
# Returns the key used for the host capability for provider installs
# of the given name.
def provider_install_key(name)
"provider_install_#{name}".to_sym
end
# This upgrades a home directory that was in the v1.1 format to the
# v1.5 format. It will raise exceptions if anything fails.
def upgrade_home_path_v1_1
if !ENV["VAGRANT_UPGRADE_SILENT_1_5"]
@ui.ask(I18n.t("vagrant.upgrading_home_path_v1_5"))
end
collection = BoxCollection.new(
@home_path.join("boxes"), temp_dir_root: tmp_path)
collection.upgrade_v1_1_v1_5
end
# This upgrades a Vagrant 1.0.x "dotfile" to the new V2 format.
#
# This is a destructive process. Once the upgrade is complete, the
# old dotfile is removed, and the environment becomes incompatible for
# Vagrant 1.0 environments.
#
# @param [Pathname] path The path to the dotfile
def upgrade_v1_dotfile(path)
@logger.info("Upgrading V1 dotfile to V2 directory structure...")
# First, verify the file isn't empty. If it is an empty file, we
# just delete it and go on with life.
contents = path.read.strip
if contents.strip == ""
@logger.info("V1 dotfile was empty. Removing and moving on.")
path.delete
return
end
# Otherwise, verify there is valid JSON in here since a Vagrant
# environment would always ensure valid JSON. This is a sanity check
# to make sure we don't nuke a dotfile that is not ours...
@logger.debug("Attempting to parse JSON of V1 file")
json_data = nil
begin
json_data = JSON.parse(contents)
@logger.debug("JSON parsed successfully. Things are okay.")
rescue JSON::ParserError
# The file could've been tampered with since Vagrant 1.0.x is
# supposed to ensure that the contents are valid JSON. Show an error.
raise Errors::DotfileUpgradeJSONError,
state_file: path.to_s
end
# Alright, let's upgrade this guy to the new structure. Start by
# backing up the old dotfile.
backup_file = path.dirname.join(".vagrant.v1.#{Time.now.to_i}")
@logger.info("Renaming old dotfile to: #{backup_file}")
path.rename(backup_file)
# Now, we create the actual local data directory. This should succeed
# this time since we renamed the old conflicting V1.
setup_local_data_path(true)
if json_data["active"]
@logger.debug("Upgrading to V2 style for each active VM")
json_data["active"].each do |name, id|
@logger.info("Upgrading dotfile: #{name} (#{id})")
# Create the machine configuration directory
directory = @local_data_path.join("machines/#{name}/virtualbox")
FileUtils.mkdir_p(directory)
# Write the ID file
directory.join("id").open("w+") do |f|
f.write(id)
end
end
end
# Upgrade complete! Let the user know
@ui.info(I18n.t("vagrant.general.upgraded_v1_dotfile",
backup_path: backup_file.to_s))
end
end
|
GeorgeKaraszi/ActiveRecordExtended | lib/active_record_extended/utilities.rb | ActiveRecordExtended.Utilities.to_arel_sql | ruby | def to_arel_sql(value)
case value
when Arel::Node, Arel::Nodes::SqlLiteral, nil
value
when ActiveRecord::Relation
Arel.sql(value.spawn.to_sql)
else
Arel.sql(value.respond_to?(:to_sql) ? value.to_sql : value.to_s)
end
end | Converts a potential subquery into a compatible Arel SQL node.
Note:
We convert relations to SQL to maintain compatibility with Rails 5.[0/1].
Only Rails 5.2+ maintains bound attributes in Arel, so its better to be safe then sorry.
When we drop support for Rails 5.[0/1], we then can then drop the '.to_sql' conversation | train | https://github.com/GeorgeKaraszi/ActiveRecordExtended/blob/aca74eebb64b9957a2c8765bef6e43c7d5736fd8/lib/active_record_extended/utilities.rb#L140-L149 | module Utilities
A_TO_Z_KEYS = ("a".."z").to_a.freeze
# We need to ensure we can flatten nested ActiveRecord::Relations
# that might have been nested due to the (splat)*args parameters
#
# Note: calling `Array.flatten[!]/1` will actually remove all AR relations from the array.
#
def flatten_to_sql(*values)
flatten_safely(values) do |value|
value = yield value if block_given?
to_arel_sql(value)
end
end
alias to_sql_array flatten_to_sql
def flatten_safely(values, &block)
unless values.is_a?(Array)
values = yield values if block_given?
return [values]
end
values.map { |value| flatten_safely(value, &block) }.reduce(:+)
end
# Applies aliases to the given query
# Ex: `SELECT * FROM users` => `(SELECT * FROM users) AS "members"`
def nested_alias_escape(query, alias_name)
sql_query = Arel::Nodes::Grouping.new(to_arel_sql(query))
Arel::Nodes::As.new(sql_query, to_arel_sql(double_quote(alias_name)))
end
# Wraps subquery into an Aliased ARRAY
# Ex: `SELECT * FROM users` => (ARRAY(SELECT * FROM users)) AS "members"
def wrap_with_array(arel_or_rel_query, alias_name)
query = Arel::Nodes::Array.new(to_sql_array(arel_or_rel_query))
nested_alias_escape(query, alias_name)
end
# Wraps query into an aggregated array
# EX: `(ARRAY_AGG((SELECT * FROM users)) AS "members"`
# `(ARRAY_AGG(DISTINCT (SELECT * FROM users)) AS "members"`
# `SELECT ARRAY_AGG((id)) AS "ids" FROM users`
# `SELECT ARRAY_AGG(DISTINCT (id)) AS "ids" FROM users`
def wrap_with_agg_array(arel_or_rel_query, alias_name, casting_option = :agg, distinct = false)
query = group_when_needed(arel_or_rel_query)
query = Arel::Nodes::ArrayAgg.new(to_sql_array(query))
query.distinct = distinct || [:agg_distinct, :array_agg_distinct].include?(casting_option)
nested_alias_escape(query, alias_name)
end
# Will attempt to digest and resolve the from clause
#
# If the from clause is a String, it will check to see if a table reference key has been assigned.
# - If one cannot be detected, one will be appended.
# - Rails does not allow assigning table references using the `.from/2` method, when its a string / sym type.
#
# If the from clause is an AR relation; it will duplicate the object.
# - Ensures any memorizers are reset (ex: `.to_sql` sets a memorizer on the instance)
# - Key's can be assigned using the `.from/2` method.
#
def from_clause_constructor(from, reference_key)
case from
when /\s.?#{reference_key}.?$/ # The from clause is a string and has the tbl reference key
@scope.unscoped.from(from)
when String, Symbol
@scope.unscoped.from("#{from} #{reference_key}")
else
replicate_klass = from.respond_to?(:unscoped) ? from.unscoped : @scope.unscoped
replicate_klass.from(from.dup, reference_key)
end
end
# Will carry defined CTE tables from the nested sub-query and gradually pushes it up to the parents query stack
# I.E: It pushes `WITH [:cte_name:] AS(...), ..` to the top of the query structure tree
#
# SPECIAL GOTCHA NOTE: (if duplicate keys are found) This will favor the parents query `with's` over nested ones!
def pipe_cte_with!(subquery)
return self unless subquery.try(:with_values?)
cte_ary = flatten_safely(subquery.with_values)
subquery.with_values = nil # Remove nested queries with values
# Add subquery's CTE's to the parents query stack. (READ THE SPECIAL NOTE ABOVE!)
if @scope.with_values?
# combine top-level and lower level queries `.with` values into 1 structure
with_hash = cte_ary.each_with_object(@scope.with_values.first) do |from_cte, hash|
hash.reverse_merge!(from_cte)
end
@scope.with_values = [with_hash]
else
# Top level has no with values
@scope.with!(*cte_ary)
end
self
end
# Ensures the given value is properly double quoted.
# This also ensures we don't have conflicts with reversed keywords.
#
# IE: `user` is a reserved keyword in PG. But `"user"` is allowed and works the same
# when used as an column/tbl alias.
def double_quote(value)
return if value.nil?
case value.to_s
# Ignore keys that contain double quotes or a Arel.star (*)[all columns]
# or if a table has already been explicitly declared (ex: users.id)
when "*", /((^".+"$)|(^[[:alpha:]]+\.[[:alnum:]]+))/
value
else
PG::Connection.quote_ident(value.to_s)
end
end
# Ensures the key is properly single quoted and treated as a actual PG key reference.
def literal_key(key)
case key
when TrueClass then "'t'"
when FalseClass then "'f'"
when Numeric then key
else
key = key.to_s
key.start_with?("'") && key.end_with?("'") ? key : "'#{key}'"
end
end
# Converts a potential subquery into a compatible Arel SQL node.
#
# Note:
# We convert relations to SQL to maintain compatibility with Rails 5.[0/1].
# Only Rails 5.2+ maintains bound attributes in Arel, so its better to be safe then sorry.
# When we drop support for Rails 5.[0/1], we then can then drop the '.to_sql' conversation
def group_when_needed(arel_or_rel_query)
return arel_or_rel_query unless needs_to_be_grouped?(arel_or_rel_query)
Arel::Nodes::Grouping.new(to_arel_sql(arel_or_rel_query))
end
def needs_to_be_grouped?(query)
query.respond_to?(:to_sql) || (query.is_a?(String) && /^SELECT.+/i.match?(query))
end
def key_generator
A_TO_Z_KEYS.sample
end
end
|
nestor-custodio/automodel-sqlserver | lib/automodel/schema_inspector.rb | Automodel.SchemaInspector.foreign_keys | ruby | def foreign_keys(table_name)
table_name = table_name.to_s
@foreign_keys ||= {}
@foreign_keys[table_name] ||= begin
if @registration[:foreign_keys].present?
@registration[:foreign_keys].call(@connection, table_name)
else
begin
@connection.foreign_keys(table_name)
rescue ::NoMethodError, ::NotImplementedError
## Not all ActiveRecord adapters support `#foreign_keys`. When this happens, we'll make
## a best-effort attempt to intuit relationships from the table and column names.
##
columns(table_name).map do |column|
id_pattern = %r{(?:_id|Id)$}
next unless column.name =~ id_pattern
target_table = column.name.sub(id_pattern, '')
next unless target_table.in? tables
target_column = primary_key(qualified_name(target_table, context: table_name))
next unless target_column.in? ['id', 'Id', 'ID', column.name]
ActiveRecord::ConnectionAdapters::ForeignKeyDefinition.new(
table_name.split('.').last,
target_table,
name: "FK_#{SecureRandom.uuid.delete('-')}",
column: column.name,
primary_key: target_column,
on_update: nil,
on_delete: nil
)
end.compact
end
end
end
end | Returns a list of foreign keys for the given table.
If a matching Automodel::SchemaInspector registration is found for the connection's adapter,
and that registration specified a `:foreign_keys` Proc, the Proc is called. Otherwise, the
standard connection `#foreign_keys` is attempted. If that call to ``#foreign_keys` raises a
::NoMethodError or ::NotImplementedError, a best-effort attempt is made to build a list of
foreign keys based on table and column names.
@param table_name [String]
The table whose foreign keys should be fetched.
@return [Array<ActiveRecord::ConnectionAdapters::ForeignKeyDefinition>] | train | https://github.com/nestor-custodio/automodel-sqlserver/blob/7269224752274f59113ccf8267fc49316062ae22/lib/automodel/schema_inspector.rb#L151-L188 | class SchemaInspector
## rubocop:disable all
## Class-Instance variable: `known_adapters` is a Hash of adapters registered via
## {Automodel::SchemaInspector.register_adapter}.
##
@known_adapters = {}
def self.known_adapters; @known_adapters; end
def known_adapters; self.class.known_adapters; end
## rubocop:enable all
## "Registers" an adapter with the Automodel::SchemaInspector. This allows for alternate
## mechanisms of procuring lists of tables, columns, primary keys, and/or foreign keys from an
## adapter that may not itself support `#tables`/`#columns`/`#primary_key`/`#foreign_keys`.
##
##
## @param adapter [String, Symbol]
## The "adapter" value used to match that given in the connection spec. It is with this value
## that the adapter being registered is matched to an existing database pool/connection.
##
## @param tables [Proc]
## The Proc to `#call` to request a list of table names. The Proc will be called with one
## parameter: a database connection.
##
## @param columns [Proc]
## The Proc to `#call` to request a list of columns for a specific table. The Proc will be
## called with two parameters: a database connection and a table name.
##
## @param primary_key [Proc]
## The Proc to `#call` to request the primary key for a specific table. The Proc will be
## called with two parameters: a database connection and a table name.
##
## @param foreign_keys [Proc]
## The Proc to `#call` to request a list of foreign keys for a specific table. The Proc will
## be called with two parameters: a database connection and a table name.
##
##
## @raise [Automodel::AdapterAlreadyRegisteredError]
##
def self.register_adapter(adapter:, tables:, columns:, primary_key:, foreign_keys: nil)
adapter = adapter.to_sym.downcase
raise Automodel::AdapterAlreadyRegisteredError, adapter if known_adapters.key? adapter
known_adapters[adapter] = { tables: tables,
columns: columns,
primary_key: primary_key,
foreign_keys: foreign_keys }
end
## @param connection_handler [ActiveRecord::ConnectionHandling]
## The connection pool/handler (an object that implements ActiveRecord::ConnectionHandling) to
## inspect and map out.
##
def initialize(connection_handler)
@connection = connection_handler.connection
adapter = connection_handler.connection_pool.spec.config[:adapter]
@registration = known_adapters[adapter.to_sym] || {}
end
## Returns a list of table names in the target database.
##
## If a matching Automodel::SchemaInspector registration is found for the connection's adapter,
## and that registration specified a `:tables` Proc, the Proc is called. Otherwise, the standard
## connection `#tables` is returned.
##
##
## @return [Array<String>]
##
def tables
@tables ||= if @registration[:tables].present?
@registration[:tables].call(@connection)
else
@connection.tables
end
end
## Returns a list of columns for the given table.
##
## If a matching Automodel::SchemaInspector registration is found for the connection's adapter,
## and that registration specified a `:columns` Proc, the Proc is called. Otherwise, the
## standard connection `#columns` is returned.
##
##
## @param table_name [String]
## The table whose columns should be fetched.
##
##
## @return [Array<ActiveRecord::ConnectionAdapters::Column>]
##
def columns(table_name)
table_name = table_name.to_s
@columns ||= {}
@columns[table_name] ||= if @registration[:columns].present?
@registration[:columns].call(@connection, table_name)
else
@connection.columns(table_name)
end
end
## Returns the primary key for the given table.
##
## If a matching Automodel::SchemaInspector registration is found for the connection's adapter,
## and that registration specified a `:primary_key` Proc, the Proc is called. Otherwise, the
## standard connection `#primary_key` is returned.
##
##
## @param table_name [String]
## The table whose primary key should be fetched.
##
##
## @return [String, Array<String>]
##
def primary_key(table_name)
table_name = table_name.to_s
@primary_keys ||= {}
@primary_keys[table_name] ||= if @registration[:primary_key].present?
@registration[:primary_key].call(@connection, table_name)
else
@connection.primary_key(table_name)
end
end
## Returns a list of foreign keys for the given table.
##
## If a matching Automodel::SchemaInspector registration is found for the connection's adapter,
## and that registration specified a `:foreign_keys` Proc, the Proc is called. Otherwise, the
## standard connection `#foreign_keys` is attempted. If that call to ``#foreign_keys` raises a
## ::NoMethodError or ::NotImplementedError, a best-effort attempt is made to build a list of
## foreign keys based on table and column names.
##
##
## @param table_name [String]
## The table whose foreign keys should be fetched.
##
##
## @return [Array<ActiveRecord::ConnectionAdapters::ForeignKeyDefinition>]
##
private
## Returns a qualified table name.
##
##
## @param table_name [String]
## The name to qualify.
##
## @param context [String]
## The name of an existing table from whose namespace we want to be able to reach the first
## table.
##
##
## @return [String]
##
def qualified(table_name, context:)
return table_name if table_name['.'].present?
return table_name if context['.'].blank?
"#{context.sub(%r{[^.]*$}, '')}#{table_name}"
end
## Returns an unqualified table name.
##
def unqualified(table_name)
table_name.split('.').last
end
end
|
HewlettPackard/hpe3par_ruby_sdk | lib/Hpe3parSdk/client.rb | Hpe3parSdk.Client.create_volume_set | ruby | def create_volume_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@volume_set.create_volume_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end | Creates a new volume set
==== Attributes
* name - the volume set to create
type name: String
* domain: the domain where the set lives
type domain: String
* comment: the comment for the vv set
type comment: String
* setmembers: the vv(s) to add to the set, the existence of the vv(s) will not be checked
type name: Array of String
==== Raises
* Hpe3parSdk::HTTPBadRequest
- INV_INPUT Invalid URI Syntax.
* Hpe3parSdk::HTTPBadRequest
- NON_EXISTENT_DOMAIN - Domain doesn't exist.
* Hpe3parSdk::HTTPBadRequest
- NO_SPACE - Not Enough space is available.
* Hpe3parSdk::HTTPBadRequest
- BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
* Hpe3parSdk::HTTPForbidden
- PERM_DENIED - Permission denied
* Hpe3parSdk::HTTPConflict
- EXISTENT_CPG - CPG Exists already | train | https://github.com/HewlettPackard/hpe3par_ruby_sdk/blob/f8cfc6e597741be593cf7fe013accadf982ee68b/lib/Hpe3parSdk/client.rb#L1850-L1857 | class Client
def initialize(api_url,debug:false, secure: false, timeout: nil, suppress_ssl_warnings: false, app_type: 'ruby_SDK_3par', log_file_path: nil)
unless api_url.is_a?(String)
raise Hpe3parSdk::HPE3PARException.new(nil,
"'api_url' parameter is mandatory and should be of type String")
end
@api_url = api_url
@debug = debug
@secure = secure
@timeout = timeout
@suppress_ssl_warnings = suppress_ssl_warnings
@log_level = Logger::INFO
@log_file_path = log_file_path
init_log
@http = HTTPJSONRestClient.new(
@api_url, @secure, @debug,
@suppress_ssl_warnings, @timeout = nil
)
check_WSAPI_version
@vlun_query_supported = false
@cpg = CPGManager.new(@http)
@qos = QOSManager.new(@http)
@flash_cache = FlashCacheManager.new(@http)
@port = PortManager.new(@http)
@task = TaskManager.new(@http)
@host_and_vv_set_filter_supported = false
@ssh = nil
@vlun = VlunManager.new(@http, @vlun_query_supported)
@host = HostManager.new(@http, @vlun_query_supported)
@volume_set = VolumeSetManager.new(@http, @host_and_vv_set_filter_supported)
@host_set = HostSetManager.new(@http, @host_and_vv_set_filter_supported)
@app_type = app_type
end
private def init_log
unless @log_file_path.nil?
client_logger = Logger.new(@log_file_path, 'daily', formatter: CustomFormatter.new)
else
client_logger = Logger.new(STDOUT)
end
if @debug
@log_level = Logger::DEBUG
end
Hpe3parSdk.logger = MultiLog.new(:level => @log_level, :loggers => client_logger)
end
private def check_WSAPI_version
begin
@api_version = get_ws_api_version
rescue HPE3PARException => ex
ex_message = ex.message
if ex_message && ex_message.include?('SSL Certificate Verification Failed')
raise Hpe3parSdk::SSLCertFailed
else
msg = "Error: #{ex_message} - Error communicating with 3PAR WSAPI. '
'Check proxy settings. If error persists, either the '
'3PAR WSAPI is not running OR the version of the WSAPI is '
'not supported."
raise Hpe3parSdk::HPE3PARException(message: msg)
end
end
compare_version(@api_version)
end
private def set_ssh_options(username, password, port=22, conn_timeout=nil)
@ssh=Hpe3parSdk::SSH.new(@api_url.split("//")[1].split(":")[0], username, password)
end
private def compare_version(api_version)
@min_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION)
@min_version_with_compression = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_COMPRESSION_SUPPORT)
@current_version = WSAPIVersion.new(api_version['major'], api_version['minor'],
api_version['revision'])
if @current_version < @min_version
err_msg = "Unsupported 3PAR WS API version #{@current_version}, min supported version is, #{WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION}"
raise Hpe3parSdk::UnsupportedVersion.new(nil, err_msg)
end
# Check for VLUN query support.
min_vlun_query_support_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_VLUN_QUERY_SUPPORT)
if @current_version >= min_vlun_query_support_version
@vlun_query_supported = true
end
# Check for Host and VV Set query support
if @current_version >= @min_version_with_compression
@host_and_vv_set_filter_supported = true
end
end
# Get the 3PAR WS API version.
#
# ==== Returns
#
# WSAPI version hash
def get_ws_api_version
# remove everything down to host:port
host_url = @api_url.split('/api')
@http.set_url(host_url[0])
begin
# get the api version
response = @http.get('/api')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
ensure
# reset the url
@http.set_url(@api_url)
end
# Gets the WSAPI Configuration.
#
# ==== Returns
#
# WSAPI configuration hash
def get_ws_api_configuration_info
begin
response = @http.get('/wsapiconfiguration')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new FlashCache
#
# ==== Attributes
#
# * size_in_gib - Specifies the node pair size of the Flash Cache on the system
# type size_in_gib: Integer
# * mode - Values supported Simulator: 1, Real: 2 (default)
# type mode: Integer
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not enough space is available for the operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - A JSON input object contains a name-value pair with a numeric value that exceeds the expected range. Flash Cache exceeds the expected range. The HTTP ref member contains the name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_FLASH_CACHE - The Flash Cache already exists.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported.
# * Hpe3parSdk::HTTPBadRequest
# - INV_FLASH_CACHE_SIZE - Invalid Flash Cache size. The size must be a multiple of 16 G.
def create_flash_cache(size_in_gib, mode = nil)
begin
@flash_cache.create_flash_cache(size_in_gib, mode)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get Flash Cache information
#
# ==== Returns
#
# FlashCache - Details of the specified flash cache
def get_flash_cache
begin
@flash_cache.get_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes an existing Flash Cache
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_IS_BEING_REMOVED - Unable to delete the Flash Cache, the Flash Cache is being removed.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported on this system.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_FLASH_CACHE - The Flash Cache does not exist.
def delete_flash_cache
begin
@flash_cache.delete_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Storage System Information
#
# ==== Returns
#
# Hash of Storage System Info
def get_storage_system_info
begin
response = @http.get('/system')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the overall system capacity for the 3PAR server.
#
# ==== Returns
#
# Hash of system capacity information
#
#
# capacity = {
# "allCapacity"=> { # Overall system capacity
# # includes FC, NL, SSD
# # device types
# "totalMiB"=>20054016, # Total system capacity
# # in MiB
# "allocated"=>{ # Allocated space info
# "totalAllocatedMiB"=>12535808, # Total allocated
# # capacity
# "volumes"=> { # Volume capacity info
# "totalVolumesMiB"=>10919936, # Total capacity
# # allocated to volumes
# "nonCPGsMiB"=> 0, # Total non-CPG capacity
# "nonCPGUserMiB"=> 0, # The capacity allocated
# # to non-CPG user space
# "nonCPGSnapshotMiB"=>0, # The capacity allocated
# # to non-CPG snapshot
# # volumes
# "nonCPGAdminMiB"=> 0, # The capacity allocated
# # to non-CPG
# # administrative volumes
# "CPGsMiB"=>10919936, # Total capacity
# # allocated to CPGs
# "CPGUserMiB"=>7205538, # User CPG space
# "CPGUserUsedMiB"=>7092550, # The CPG allocated to
# # user space that is
# # in use
# "CPGUserUnusedMiB"=>112988, # The CPG allocated to
# # user space that is not
# # in use
# "CPGSnapshotMiB"=>2411870, # Snapshot CPG space
# "CPGSnapshotUsedMiB"=>210256, # CPG allocated to
# # snapshot that is in use
# "CPGSnapshotUnusedMiB"=>2201614, # CPG allocated to
# # snapshot space that is
# # not in use
# "CPGAdminMiB"=>1302528, # Administrative volume
# # CPG space
# "CPGAdminUsedMiB"=> 115200, # The CPG allocated to
# # administrative space
# # that is in use
# "CPGAdminUnusedMiB"=>1187328, # The CPG allocated to
# # administrative space
# # that is not in use
# "unmappedMiB"=>0 # Allocated volume space
# # that is unmapped
# },
# "system"=> { # System capacity info
# "totalSystemMiB"=> 1615872, # System space capacity
# "internalMiB"=>780288, # The system capacity
# # allocated to internal
# # resources
# "spareMiB"=> 835584, # Total spare capacity
# "spareUsedMiB"=> 0, # The system capacity
# # allocated to spare resources
# # in use
# "spareUnusedMiB"=> 835584 # The system capacity
# # allocated to spare resources
# # that are unused
# }
# },
# "freeMiB"=> 7518208, # Free capacity
# "freeInitializedMiB"=> 7518208, # Free initialized capacity
# "freeUninitializedMiB"=> 0, # Free uninitialized capacity
# "unavailableCapacityMiB"=> 0, # Unavailable capacity in MiB
# "failedCapacityMiB"=> 0 # Failed capacity in MiB
# },
# "FCCapacity"=> { # System capacity from FC devices only
# ... # Same structure as above
# },
# "NLCapacity"=> { # System capacity from NL devices only
# ... # Same structure as above
# },
# "SSDCapacity"=> { # System capacity from SSD devices only
# ... # Same structure as above
# }
# }
def get_overall_system_capacity
begin
response = @http.get('/capacity')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# This authenticates against the 3PAR WSAPI server and creates a session.
# ==== Attributes
#
# * username - The username
# type username: String
# * password - The Password
# type password: String
def login(username, password, optional = nil)
set_ssh_options(username, password, port=22, conn_timeout=nil)
@volume = VolumeManager.new(@http, @ssh, @app_type)
@http.authenticate(username, password, optional)
end
# Get the list of all 3PAR Tasks
#
# ==== Returns
#
# Array of Task
def get_all_tasks
begin
@task.get_all_tasks
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get the status of a 3PAR Task
#
# ==== Attributes
#
# * task_id - the task id
# type task_id: Integer
#
# ==== Returns
#
# Task
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - Bad Request Task ID must be a positive value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Bad Request Task ID is too large.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_TASK - Task with the specified Task ID does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Task ID is not an integer.
def get_task(task_id)
begin
@task.get_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def vlun_exists?(volname,lunid,host=nil,port=nil)
begin
@vlun.vlun_exists?(volname,lunid,host,port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new VLUN.
#
# When creating a VLUN, the volumeName is required. The lun member is
# not required if auto is set to True.
# Either hostname or portPos (or both in the case of matched sets) is
# also required. The noVcn and overrideLowerPriority members are
# optional.
# * volume_name: Name of the volume to be exported
# type volume_name: String
# * lun: LUN id
# type lun: Integer
# * host_name: Name of the host which the volume is to be exported.
# type host_name: String
# * port_pos: System port of VLUN exported to. It includes node number, slot number, and card port number
# type port_pos: Hash
# port_pos = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=> 1} # Port number on the FC card (0-4)
# * no_vcn: A VLUN change notification (VCN) not be issued after export (-novcn).
# type no_vcn: Boolean
# * override_lower_priority: Existing lower priority VLUNs will be overridden (-ovrd). Use only if hostname member exists.
# type override_lower_priority: Boolean
#
# ==== Returns
#
# VLUN id
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ MISSING_REQUIRED - Missing volume or hostname or lunid.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL MISSING_REQUIRED - Specified volume does not exist.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Specified hostname not found.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - Specified port does not exist.
def create_vlun(volume_name, lun = nil, host_name = nil, port_pos = nil, no_vcn = false, override_lower_priority = false, auto = false)
begin
@vlun.create_vlun(volume_name, host_name, lun, port_pos, no_vcn, override_lower_priority, auto)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets VLUNs.
#
# ==== Returns
#
# Array of VLUN objects
def get_vluns
begin
@vlun.get_vluns
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a VLUN.
#
# ==== Attributes
#
# * volume_name: The volume name of the VLUN to find
# type volume_name: String
#
# ==== Returns
#
# VLUN object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - VLUN doesn't exist
def get_vlun(volume_name)
begin
@vlun.get_vlun(volume_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a VLUN.
#
# ==== Attributes
#
# * volume_name: Volume name of the VLUN
# type volume_name: String
# * lun_id: LUN ID
# type lun_id: Integer
# * host_name: Name of the host which the volume is exported. For VLUN of port type,the value is empty
# type host_name: String
# * port: Specifies the system port of the VLUN export. It includes the system node number, PCI bus slot number, and card port number on the FC card in the format<node>:<slot>:<cardPort>
# type port: Hash
#
# port = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=>1} # Port number on the FC card (0-4)
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Incomplete VLUN info. Missing
# volumeName or lun, or both hostname and port.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PORT_SELECTION - Specified port is invalid.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - The LUN specified exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - The VLUN does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - The port does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_vlun(volume_name, lun_id, host_name = nil, port = nil)
begin
@vlun.delete_vlun(volume_name, lun_id, host_name, port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets QoS Rules.
#
# ==== Returns
#
# Array of QoSRule objects
#
def query_qos_rules
begin
@qos.query_qos_rules
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Queries a QoS rule
#
# ==== Attributes
#
# * target_name : Name of the target. When targetType is sys, target name must be sys:all_others.
# type target_name: String
# * target_type : Target type is vvset or sys
# type target_type: String
# ==== Returns
#
# QoSRule object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
def query_qos_rule(target_name, target_type = 'vvset')
begin
@qos.query_qos_rule(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def qos_rule_exists?(target_name, target_type = 'vvset')
begin
@qos.qos_rule_exists?(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates QOS rules
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_type: Type of QoS target, either enum TARGET_TYPE_VVS or TARGET_TYPE_SYS.
# type target_type: VVSET or SYS. Refer QoStargetType::VVSET for complete enumeration
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> false, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth mimumum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=>1, # zero none operation enum, when set to
# # 1, I/O minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=>5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit should be greater than the mimimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - I/O-per-second limit is below range.Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - The system is not licensed for QoS.
def create_qos_rules(target_name, qos_rules, target_type = QoStargetType::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.create_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing QOS rules
#
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * target_type: Type of QoS target, either vvset or sys.Refer Hpe3parSdk::QoStargetTypeConstants for complete enumeration
# type target_type: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal.
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> True, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth minimum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, I/O minimum goal minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=> 5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_IO_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BELOW_RANGE - I/O-per-second limit is below
# range. Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# UNLICENSED_FEATURE - The system is not licensed for QoS.
def modify_qos_rules(target_name, qos_rules, target_type = QoStargetTypeConstants::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.modify_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes QoS rules.
#
# ==== Attributes
#
# * target_name: Name of the target. When target_type is sys, target_name must be sys:all_others.
# type target_name: String
# * target_type: target type is vvset or sys
# type target_type: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input
def delete_qos_rules(target_name, target_type = QoStargetTypeConstants::VVSET)
begin
@qos.delete_qos_rules(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all hosts.
#
# ==== Returns
#
# Array of Host.
def get_hosts
begin
@host.get_hosts
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets host information by name.
#
# ==== Attributes
#
# * name - The name of the host to find.
# type name: String
#
# ==== Returns
#
# Host.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def get_host(name)
begin
@host.get_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new Host.
#
# ==== Attributes
#
# * name - The name of the host.
# type name: String
# * iscsi_names - Array of iSCSI iqns.
# type iscsi_names: Array
# * fcwwns - Array of Fibre Channel World Wide Names.
# type fcwwns: Array
# * optional - The optional stuff.
# type optional: Hash
# optional = {
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# # 3.1.3 default: Generic-ALUA
# # 3.1.2 default: General
# 'domain'=> 'myDomain', # Create the host in the
# # specified domain, or default
# # domain if unspecified.
# 'forceTearDown'=> false, # If True, force to tear down
# # low-priority VLUN exports.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> "Joe's box"} # Additional host information
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Name not specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - FCWWNs and iSCSINames are both specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host name, domain name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EMPTY_STR - Input string (for domain name, iSCSI name, etc.) is empty.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Any error from host-name or domain-name parsing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - The length of WWN is not 16. WWN specification contains non-hexadecimal digit.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - host WWN/iSCSI name already used by another host.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - host name is already used.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - No space to create host.
def create_host(name, iscsi_names = nil, fcwwns = nil, optional = nil)
begin
@host.create_host(name, iscsi_names, fcwwns, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing Host.
#
# ==== Attributes
#
# * name - Name of the host.
# type name: String
# * mod_request - Objects for host modification request.
# type mod_request: Hash
# mod_request = {
# 'newName'=> 'myNewName', # New name of the host
# 'pathOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'FCWWNs'=> [], # One or more WWN to set for the host.
# 'iSCSINames'=> [], # One or more iSCSI names to set for the host.
# 'forcePathRemoval'=> false, # If True, remove SSN(s) or
# # iSCSI(s) even if there are
# # VLUNs exported to host
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> 'Joes box'} # Additional host information
# 'chapOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'chapOperationMode'=> TARGET, # Refer Hpe3parSdk::ChapOperationMode for complete enumeration
# 'chapName'=> 'MyChapName', # The chap name
# 'chapSecret'=> 'xyz', # The chap secret for the host or the target
# 'chapSecretHex'=> false, # If True, the chapSecret is treated as Hex.
# 'chapRemoveTargetOnly'=> true # If True, then remove target chap only
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Missing host name.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Both iSCSINames & FCWWNs are specified. (lot of other possibilities).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - iSCSINames or FCWwns missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - No path operation specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_ENUM_VALUE - Invalid enum value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Required fields missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host descriptor argument length, new host name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Error parsing host or iSCSI name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - New host name is already used.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host to be modified does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Input value is of the wrong type.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - WWN or iSCSI name is already claimed by other host.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_LENGTH - CHAP hex secret length is not 16 bytes, or chap ASCII secret length is not 12 to 16 characters.
# * Hpe3parSdk::HTTPNotFound
# - NO_INITIATOR_CHAP - Setting target CHAP without initiator CHAP.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_CHAP - Remove non-existing CHAP.
# * Hpe3parSdk::HTTPConflict
# - NON_UNIQUE_CHAP_SECRET - CHAP secret is not unique.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - Setting persona with active export; remove a host path on an active export.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_PATH - Remove a non-existing path.
# * Hpe3parSdk::HTTPConflict
# - LUN_HOSTPERSONA_CONFLICT - LUN number and persona capability conflict.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_PATH - Duplicate path specified.
def modify_host(name, mod_request)
begin
@host.modify_host(name, mod_request)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a host.
#
# ==== Attributes
#
# * name - The name of host to be deleted.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found
# * Hpe3parSdk::HTTPConflict
# - HOST_IN_SET - Host is a member of a set
def delete_host(name)
begin
@host.delete_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified FC WWN path.
#
# ==== Attributes
#
# * wwn - Lookup based on WWN.
# type wwn: String
#
# ==== Returns
#
# Host with specified FC WWN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - HOST Not Found
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def query_host_by_fc_path(wwn = nil)
begin
@host.query_host_by_fc_path(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified iSCSI initiator.
#
# ==== Attributes
#
# * iqn - Lookup based on iSCSI initiator.
# type iqn: String
#
# ==== Returns
#
# Host with specified IQN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - The host name contains invalid character.
def query_host_by_iscsi_path(iqn = nil)
begin
@host.query_host_by_iscsi_path(iqn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all host sets.
#
# ==== Returns
#
# Array of HostSet.
def get_host_sets
begin
@host_set.get_host_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new HostSet.
#
# ==== Attributes
#
# * name - Name of the host set to be created.
# type name: String
# * domain - The domain in which the host set will be created.
# type domain: String
# * comment - Comment for the host set.
# type comment: String
# * setmembers - The hosts to be added to the set. The existence of the host will not be checked.
# type setmembers: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_DOMAIN - The domain does not exist.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
def create_host_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@host_set.create_host_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a HostSet.
#
# ==== Attributes
#
# * name - The hostset to delete.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - The host set has exported VLUNs.
def delete_host_set(name)
begin
@host_set.delete_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a HostSet.
#
# ==== Attributes
#
# * name - Hostset name
# type name: String
# * action - Add or Remove host(s) from the set
# type action: Refer values of Hpe3parSdk::SetCustomAction::MEM_ADD and Hpe3parSdk::SetCustomAction::MEM_REMOVE
# * setmembers - Host(s) to add to the set, the existence of the host(s) will not be checked
# type setmembers: Array of String
# * new_name - New name of set
# type new_name: String
# * comment - New comment for the set
# type comment: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal characters.
def modify_host_set(name, action = nil, setmembers = nil, new_name = nil, comment = nil)
begin
@host_set.modify_host_set(name, action, setmembers, new_name, comment)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds host(s) to a host set.
#
# ==== Attributes
#
# * set_name - Hostset name.
# type set_name: String
# * setmembers - Array of host names to add to the set.
# type setmembers: Array of String
def add_hosts_to_host_set(set_name, setmembers)
begin
@host_set.add_hosts_to_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes host(s) from a host set.
#
# ==== Attributes
#
# * set_name - The host set name.
# type set_name: String
# * setmembers - Array of host names to remove from the set.
# type setmembers: Array of String
def remove_hosts_from_host_set(set_name, setmembers)
begin
@host_set.remove_hosts_from_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every Hostset the given host is a part of. The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * host_name - The host name of whose hostset is to be found.
# type host_name: String
#
# ==== Returns
#
# Array of HostSet.
def find_host_sets(host_name)
begin
@host_set.find_host_sets(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets hostset information by name.
#
# ==== Attributes
#
# * name - The name of the hostset to find.
# type name: String
#
# ==== Returns
#
# HostSet.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exist.
def get_host_set(name)
begin
@host_set.get_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all of the VLUNs on a specific host.
#
# ==== Attributes
#
# * host_name - Name of the host.
# type host_name: String
#
# ==== Returns
#
# Array of VLUN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
def get_host_vluns(host_name)
begin
@host.get_host_vluns(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all Volumes in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_volumes
begin
@volume.get_volumes(VolumeCopyType::BASE_VOLUME)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the list of snapshots in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_snapshots
begin
@volume.get_volumes(VolumeCopyType::VIRTUAL_COPY)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by name
#
# ==== Attributes
#
# * name - The name of the volume to find
# type name: String
#
# ==== Returns
#
# VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume(name)
begin
@volume.get_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by wwn
#
# ==== Attributes
#
# * wwn - The wwn of the volume to find
# type wwn: String
#
# ==== Returns
#
# * VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume_by_wwn(wwn)
begin
@volume.get_volume_by_wwn(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * cpg_name - the name of the destination CPG
# type cpg_name: String
# * size_MiB - size in MiB for the volume
# type size_MiB: Integer
# * optional - hash of other optional items
# type optional: hash
#
# optional = {
# 'id' => 12, # Volume ID. If not specified, next
# # available is chosen
# 'comment' => 'some comment', # Additional information up to 511
# # characters
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'snapCPG' => 'CPG name', # CPG Used for snapshots
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256 # Relative time from now to retain
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid Parameter
# * Hpe3parSdk::HTTPBadRequest
# - TOO_LARGE - Volume size above limit
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_SV - Volume Exists already
def create_volume(name, cpg_name, size_MiB, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
@volume.create_volume(name, cpg_name, size_MiB, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_volume(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * volumeMods - Hash of volume attributes to change
# type volumeMods: Hash
# volumeMods = {
# 'newName' => 'newName', # New volume name
# 'comment' => 'some comment', # New volume comment
# 'snapCPG' => 'CPG name', # Snapshot CPG name
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False: (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'userCPG' => 'User CPG name', # User CPG name
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256, # Relative time from now to retain
# # volume (max 43,800 hours)
# 'rmSsSpcAllocWarning' => false, # True removes snapshot space
# # allocation warning.
# # False sets it when value > 0
# 'rmUsrSpcAllocWarwaning' => false,# True removes user space
# # allocation warning.
# # False sets it when value > 0
# 'rmExpTime' => false, # True resets expiration time to 0.
# # False sets it when value > 0
# 'rmSsSpcAllocLimit' => false, # True removes snapshot space
# # allocation limit.
# # False sets it when value > 0
# 'rmUsrSpcAllocLimit' => false # True removes user space
# # allocation limit.
# # False sets it when value > 0
# }
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WARN_GT_LIMIT - Allocation warning level is higher than
# the limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_USR_ALRT_NON_TPVV - User space allocation alerts are
# valid only with a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than
# expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_POLICY - Invalid policy specification (for example,
# caching or system is set to true).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: string length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_MODIFY_USR_CPG_TPVV - usr_cpg cannot be modified
# on a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - Retention time cannot be modified on a
# system without the Virtual Lock license.
# * Hpe3parSdk::HTTPForbidden
# - CPG_NOT_IN_SAME_DOMAIN - Snap CPG is not in the same domain as
# the user CPG.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Metadata of the VV is corrupted.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Cannot modify retention time on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Cannot modify an internal
# volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_NOT_DEFINED_ALL_NODES - Cannot modify a
# volume until the volume is defined on all volumes.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Cannot modify a
# volume when an online copy for that volume is in progress.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Cannot modify a
# volume in the middle of a conversion operation.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_SNAPSPACE_NOT_MOVED_TO_CPG - Snapshot space
# of a volume needs to be moved to a CPG before the user space.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_ACCOUNTING_IN_PROGRESS - The volume
# cannot be renamed until snapshot accounting has finished.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_ZERO_DETECT_TPVV - The zero_detect policy can be
# used only on TPVVs.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_CPG_ON_SNAPSHOT - CPG cannot be assigned to a
# snapshot.
def modify_volume(name, volume_mods)
begin
@volume.modify_volume(name, volume_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Grows an existing volume by 'amount' Mebibytes.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * amount: the additional size in MiB to add, rounded up to the next chunklet size (e.g. 256 or 1000 MiB)
# type amount: Integer
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_IN_SAME_DOMAIN - The volume is not in the same domain.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_UNSUPPORTED_VV_TYPE - Invalid operation: Cannot
# grow this type of volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_TUNE_IN_PROGRESS - Invalid operation: Volume
# tuning is in progress.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: String length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_GROW_SIZE - Invalid grow size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_CPG_LIMIT - New volume size exceeds CPG limit
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - This operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV
# conversion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_COPY_IN_PROGRESS - Invalid operation:
# online copy is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is
# in progress.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal consistency
# error.
# * Hpe3parSdk::HTTPForbidden
# - VV_SIZE_CANNOT_REDUCE - New volume size is smaller than the
# current size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_LIMITS - New volume size exceeds the limit.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_SA_SD_SPACE_REMOVED - Invalid operation: Volume
# SA/SD space is being removed.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_BUSY - Invalid operation: Volume is currently
# busy.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_PCOPY - Invalid operation: Volume is a
# physical copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PROMOTE_IN_PROGRESS - Invalid operation: Volume
# promotion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PARENT_OF_PCOPY - Invalid operation: Volume is
# the parent of physical copy.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Insufficent space for requested operation.
def grow_volume(name, amount)
begin
@volume.grow_volume(name, amount)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a physical copy of a VirtualVolume
#
# ==== Attributes
#
# * src_name - the source volume name
# type src_name: String
# * dest_name - the destination volume name
# type dest_name: String
# * dest_cpg - the destination CPG
# type dest_cpg: String
# * optional - Hash of optional parameters
# type optional: Hash
#
# optional = {
# 'online' => false, # should physical copy be
# # performed online?
# 'tpvv' => false, # use thin provisioned space
# # for destination
# # (online copy only)
# 'snapCPG' => 'OpenStack_SnapCPG', # snapshot CPG for the
# # destination
# # (online copy only)
# 'saveSnapshot' => false, # save the snapshot of the
# # source volume
# 'priority' => 1 # taskPriorityEnum (does not
# # apply to online copy - Hpe3parSdk::TaskPriority)
# }
def create_physical_copy(src_name, dest_name, dest_cpg, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
[:compression, :allowRemoteCopyParent, :skipZero].each { |key| optional.delete key }
end
begin
@volume.create_physical_copy(src_name, dest_name, dest_cpg, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a physical copy
#
# ==== Attributes
#
# * name - the name of the clone volume
# type name: String
#
# ==== Raises:
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_physical_copy(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Tunes a volume
#
# ==== Attributes
#
# * name - the volume name
# type name: String
# * tune_operation - Enum of tune operation - 1: Change User CPG, 2: Change snap CPG
# type dest_name: Integer
# * optional - hash of optional parameters
# type optional: hash
#
# optional = {
# 'userCPG' => 'user_cpg', # Specifies the new user
# # CPG to which the volume
# # will be tuned.
# 'snapCPG' => 'snap_cpg', # Specifies the snap CPG to
# # which the volume will be
# # tuned.
# 'conversionOperation' => 1, # conversion operation enum. Refer Hpe3parSdk::VolumeConversionOperation
# 'keepVV' => 'new_volume', # Name of the new volume
# # where the original logical disks are saved.
# 'compression' => true # Enables (true) or disables (false) compression.
# # You cannot compress a fully provisioned volume.
# }
def tune_volume(name, tune_operation, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
object_hash = @volume.tune_volume(name, tune_operation, optional)
get_task(object_hash['taskid'])
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every VolumeSet the given volume is a part of.
# The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * name - the volume name
# type name: String
#
# ==== Returns
#
# Array of VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Illegal op on system vol
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
def find_all_volume_sets(name)
begin
@volume_set.find_all_volume_sets(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Volume Sets
#
# ==== Returns
#
# Array of VolumeSet
def get_volume_sets
begin
@volume_set.get_volume_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the information about a Volume Set.
#
# ==== Attributes
#
# * name - The name of the CPG to find
# type name: String
#
# ==== Returns
#
# VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 102 message: Set does not exist
def get_volume_set(name)
begin
@volume_set.get_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume set
#
# ==== Attributes
#
# * name - the volume set to create
# type name: String
# * domain: the domain where the set lives
# type domain: String
# * comment: the comment for the vv set
# type comment: String
# * setmembers: the vv(s) to add to the set, the existence of the vv(s) will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - CPG Exists already
# Deletes the volume set. You must clear all QOS rules before a volume set can be deleted.
#
# ==== Attributes
#
# * name - The name of the VolumeSet
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - - EXPORTED_VLUN - The host set has exported VLUNs. The VV set was exported.
# * Hpe3parSdk::HTTPConflict
# - VVSET_QOS_TARGET - The object is already part of the set.
def delete_volume_set(name)
begin
@volume_set.delete_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume set by adding or removing a volume from the volume
# set. It's actions is based on the enums MEM_ADD or MEM_REMOVE.
#
# ==== Attributes
#
# * action: add or remove volume from the set
# type name: Hpe3parSdk::SetCustomAction
# * name: the volume set name
# type name: String
# * newName: new name of set
# type newName: String
# * comment: the comment for on the vv set
# type comment: String
# * flash_cache_policy: the flash-cache policy for the vv set
# type flash_cache_policy: enum
# * setmembers: the vv to add to the set, the existence of the vv will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to
# perform this operation.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal
# inconsistency error.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - The operation is not allowed on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - The operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be
# present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal
# characters.
def modify_volume_set(name, action = nil, newName = nil, comment = nil, flash_cache_policy = nil, setmembers = nil)
begin
@volume_set.modify_volume_set(name, action, newName, comment, flash_cache_policy, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds volume(s) to a volume set.
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * setmembers - the volume(s) name to add
# type setmembers: Array of String
def add_volumes_to_volume_set(set_name, setmembers)
begin
@volume_set.add_volumes_to_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes a volume from a volume set
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * name - the volume name to remove
# type name: String
def remove_volumes_from_volume_set(set_name, setmembers)
begin
@volume_set.remove_volumes_from_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing VolumeSet
#
# ==== Attributes
#
# * name: Name of the Snapshot. The vvname pattern is described in "VV Name Patterns" in the HPE 3PAR Command Line Interface Reference, which is available at the following website: http://www.hp.com/go/storage/docs
# type name: String
# * copy_of_name: the name of the parent volume
# type copy_of_name: String
# * comment: the comment on the vv set
# type comment: String
# * optional: Hash of optional params
# type optional: Hash
# optional = {
# 'id' => 12, # Specifies ID of the volume set
# # set, next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - Invalid volume pattern specified
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPNotFound
# - EMPTY_SET - The set is empty
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_LIMIT_REACHED - Maximum number of volumes reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The storage volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_READONLY_TO_READONLY_SNAP - Creating a read-only copy from a read-only volume is not permitted
# * Hpe3parSdk::HTTPConflict
# - NO_SNAP_CPG - No snapshot CPG has been configured for the volume
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SNAP_PARENT_SAME_BASE - Two parent snapshots share the same base volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Invalid operation. Online copyis in progress
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_ID_LIMIT_REACHED - Max number of volumeIDs has been reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_STALE_STATE - The volume is in a stale state.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started
# * Hpe3parSdk::HTTPForbidden
# - VV_UNAVAILABLE - The volume is not accessible
# * Hpe3parSdk::HTTPServiceUnavailable
# - SNAPSHOT_LIMIT_REACHED - Max number of snapshots has been reached
# * Hpe3parSdk::HTTPServiceUnavailable
# - CPG_ALLOCATION_WARNING_REACHED - The CPG has reached the allocation warning
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV conversion is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - The volume is the target of an online copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_ID - An ID exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_SNAPSHOT_NOT_SAME_TYPE - Some snapshots in the volume set are read-only, some are read-write
def create_snapshot_of_volume_set(name, copy_of_name, optional = nil)
begin
@volume_set.create_snapshot_of_volume_set(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing Volume.
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * copy_of_name - the name of the parent volume
# type copy_of_name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'id' => 12, # Specifies the ID of the volume,
# # next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - INON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def create_snapshot(name, copy_of_name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.create_snapshot(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Restores from a snapshot to a volume
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * optional - hash of other optional items
# type name: Hash
#
# optional = {
# 'online' => false, # Enables (true) or disables
# #(false) executing the promote
# #operation on an online volume.
# #The default setting is false
#
# 'priority' => 2 #Does not apply to online promote
# #operation or to stop promote
# #operation.
#
# 'allowRemoteCopyParent' => false #Allows the promote operation to
# #proceed even if the RW parent
# #volume is currently in a Remote
# #Copy volume group, if that group
# #has not been started. If the
# #Remote Copy group has been
# #started, this command fails.
# #(WSAPI 1.6 and later.)
# }
#
def restore_snapshot(name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.restore_snapshot(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a snapshot
#
# ==== Attributes
#
# * name - the name of the snapshot volume
# type name: String
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_snapshot(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the snapshots of a particular volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Returns
#
# Array of VirtualVolume
def get_volume_snapshots(name)
begin
@volume.get_volume_snapshots(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of all ports on the 3PAR.
#
# ==== Returns
#
# Array of Port.
def get_ports
begin
@port.get_ports
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of Fibre Channel Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of Fibre Channel Port.
def get_fc_ports(state = nil)
begin
@port.get_fc_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of iSCSI Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of iSCSI Port.
def get_iscsi_ports(state = nil)
begin
@port.get_iscsi_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of IP Ports.
#
# ==== Attributes
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of IP Port.
def get_ip_ports(state = nil)
begin
@port.get_ip_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets entire list of CPGs.
#
# ==== Returns
#
# CPG array
def get_cpgs
begin
@cpg.get_cpgs
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a Cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# CPG
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg(name)
begin
@cpg.get_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new CPG.
#
# ==== Attributes
#
# * name - Name of the cpg
# type name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'growthIncrementMiB' 100, # Growth increment in MiB for
# # each auto-grown operation
# 'growthLimitMiB': 1024, # Auto-grow operation is limited
# # to specified storage amount
# 'usedLDWarningAlertMiB': 200, # Threshold to trigger warning
# # of used logical disk space
# 'domain': 'MyDomain', # Name of the domain object
# 'LDLayout': {
# 'RAIDType': 1, # Disk Raid Type
# 'setSize': 100, # Size in number of chunklets
# 'HA': 0, # Layout supports failure of
# # one port pair (1),
# # one cage (2),
# # or one magazine (3)
# 'chunkletPosPref': 2, # Chunklet location perference
# # characteristics.
# # Lowest Number/Fastest transfer
# # = 1
# # Higher Number/Slower transfer
# # = 2
# 'diskPatterns': []} # Patterns for candidate disks
# }
#
# ==== Raises
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - Cpg Exists already
def create_cpg(name, optional = nil)
begin
@cpg.create_cpg(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a CPG.
#
# ==== Attributes
#
# * name - Name of the CPG
# type name: String
# * optional - hash of other optional items
# type optional: Hash
#
# optional = {
# 'newName'=> "newCPG:, # Specifies the name of the
# # CPG to update.
# 'disableAutoGrow'=>false, # Enables (false) or
# # disables (true) CPG auto
# # grow. Defaults to false.
# 'rmGrowthLimit'=> false, # Enables (false) or
# # disables (true) auto grow
# # limit enforcement. Defaults
# # to false.
# 'rmWarningAlert'=> false, # Enables (false) or
# # disables (true) warning
# # limit enforcement. Defaults
# # to false.
# }
#
def modify_cpg(name, cpg_mods)
begin
@cpg.modify_cpg(name, cpg_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets available space information about a cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# Available space details in form of LDLayoutCapacity object
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg_available_space(name)
begin
@cpg.get_cpg_available_space(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a CPG.
#
# ==== Attributes
#
# * name - The name of the CPG
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: CPG does not exist
# * Hpe3parSdk::HTTPForbidden
# - IN_USE - The CPG Cannot be removed because it's in use.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_cpg(name)
begin
@cpg.delete_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the status of an online physical copy
#
# ==== Attributes
#
# * name - The name of the volume
# type name: str
#
# ==== Returns
#
# Status of online copy (String)
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error: message: Volume not an online physical copy
def get_online_physical_copy_status(name)
begin
@volume.get_online_physical_copy_status(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an offline physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_offline_physical_copy(name)
begin
@volume.stop_offline_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an online physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_online_physical_copy(name)
begin
@volume.stop_online_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Resynchronizes a physical copy.
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def resync_physical_copy(name)
begin
@volume.resync_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Waits for a 3PAR task to end.
#
# ==== Attributes
#
# * task_id - The Id of the task to be waited upon.
# type task_id: Integer
# * poll_rate_secs - The polling interval in seconds.
# type poll_rate_secs: Integer
def wait_for_task_to_end(task_id, poll_rate_secs = 15)
begin
@task.wait_for_task_to_end(task_id, poll_rate_secs)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Cancel a 3PAR task
#
# ==== Attributes
#
# * task_id - The Id of the task to be cancelled.
# type task_id: Integer
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NON_ACTIVE_TASK - The task is not active at this time.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_CANNOT_CANCEL_ TASK - Invalid operation: Task cannot be cancelled.
def cancel_task(task_id)
begin
@task.cancel_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_exists?(name)
begin
@volume.volume_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_set_exists?(name)
begin
@volume_set.volume_set_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_exists?(host_name)
begin
@host.host_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_set_exists?(host_name)
begin
@host_set.host_set_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def cpg_exists?(name)
begin
@cpg.cpg_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def online_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.online_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def offline_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.offline_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Logout from the 3PAR Array
def logout
unless @log_file_path.nil?
if Hpe3parSdk.logger != nil
Hpe3parSdk.logger.close
Hpe3parSdk.logger = nil
end
end
begin
@http.unauthenticate
rescue Hpe3parSdk::HPE3PARException => ex
#Do nothing
end
end
end
|
kmuto/review | lib/epubmaker/producer.rb | EPUBMaker.Producer.complement | ruby | def complement
@config['htmlext'] ||= 'html'
defaults = ReVIEW::Configure.new.merge(
'language' => 'ja',
'date' => Time.now.strftime('%Y-%m-%d'),
'modified' => Time.now.utc.strftime('%Y-%02m-%02dT%02H:%02M:%02SZ'),
'isbn' => nil,
'toclevel' => 2,
'stylesheet' => [],
'epubversion' => 3,
'htmlversion' => 5,
'secnolevel' => 2,
'pre_secnolevel' => 0,
'post_secnolevel' => 1,
'part_secnolevel' => 1,
'titlepage' => true,
'titlefile' => nil,
'originaltitlefile' => nil,
'profile' => nil,
'colophon' => nil,
'colophon_order' => %w[aut csl trl dsr ill edt pbl prt pht],
'direction' => 'ltr',
'epubmaker' => {
'flattoc' => nil,
'flattocindent' => true,
'ncx_indent' => [],
'zip_stage1' => 'zip -0Xq',
'zip_stage2' => 'zip -Xr9Dq',
'zip_addpath' => nil,
'hook_beforeprocess' => nil,
'hook_afterfrontmatter' => nil,
'hook_afterbody' => nil,
'hook_afterbackmatter' => nil,
'hook_aftercopyimage' => nil,
'hook_prepack' => nil,
'rename_for_legacy' => nil,
'verify_target_images' => nil,
'force_include_images' => [],
'cover_linear' => nil,
'back_footnote' => nil
},
'externallink' => true,
'contentdir' => '.',
'imagedir' => 'images',
'fontdir' => 'fonts',
'image_ext' => %w[png gif jpg jpeg svg ttf woff otf],
'image_maxpixels' => 4_000_000,
'font_ext' => %w[ttf woff otf]
)
@config = defaults.deep_merge(@config)
@config['title'] = @config['booktitle'] unless @config['title']
deprecated_parameters = {
'ncxindent' => 'epubmaker:ncxindent',
'flattoc' => 'epubmaker:flattoc',
'flattocindent' => 'epubmaker:flattocindent',
'hook_beforeprocess' => 'epubmaker:hook_beforeprocess',
'hook_afterfrontmatter' => 'epubmaker:hook_afterfrontmatter',
'hook_afterbody' => 'epubmaker:hook_afterbody',
'hook_afterbackmatter' => 'epubmaker:hook_afterbackmatter',
'hook_aftercopyimage' => 'epubmaker:hook_aftercopyimage',
'hook_prepack' => 'epubmaker:hook_prepack',
'rename_for_legacy' => 'epubmaker:rename_for_legacy',
'zip_stage1' => 'epubmaker:zip_stage1',
'zip_stage2' => 'epubmaker:zip_stage2',
'zip_addpath' => 'epubmaker:zip_addpath',
'verify_target_images' => 'epubmaker:verify_target_images',
'force_include_images' => 'epubmaker:force_include_images',
'cover_linear' => 'epubmaker:cover_linear'
}
deprecated_parameters.each_pair do |k, v|
next if @config[k].nil?
sa = v.split(':', 2)
warn "Parameter #{k} is deprecated. Use:\n#{sa[0]}:\n #{sa[1]}: ...\n\n"
@config[sa[0]][sa[1]] = @config[k]
@config.delete(k)
end
if @config['epubversion'] >= 3
@config['htmlversion'] = 5
end
@config.maker = 'epubmaker'
@config['cover'] = "#{@config['bookname']}.#{@config['htmlext']}" unless @config['cover']
%w[bookname title].each do |k|
raise "Key #{k} must have a value. Abort." unless @config[k]
end
# array
%w[subject aut
a-adp a-ann a-arr a-art a-asn a-aqt a-aft a-aui a-ant a-bkp a-clb a-cmm a-dsr a-edt
a-ill a-lyr a-mdc a-mus a-nrt a-oth a-pht a-prt a-red a-rev a-spn a-ths a-trc a-trl
adp ann arr art asn aut aqt aft aui ant bkp clb cmm dsr edt
ill lyr mdc mus nrt oth pht pbl prt red rev spn ths trc trl
stylesheet rights].each do |item|
next unless @config[item]
if @config[item].is_a?(String)
@config[item] = [@config[item]]
end
end
# optional
# type, format, identifier, source, relation, coverpage, aut
end | Complement parameters. | train | https://github.com/kmuto/review/blob/77d1273e671663f05db2992281fd891b776badf0/lib/epubmaker/producer.rb#L234-L338 | class Producer
# Array of content objects.
attr_accessor :contents
# Parameter hash.
attr_accessor :config
# Message resource object.
attr_reader :res
# Take YAML +file+ and return parameter hash.
def self.load(file)
if file.nil? || !File.exist?(file)
raise "Can't open #{file}."
end
loader = ReVIEW::YAMLLoader.new
loader.load_file(file)
end
# Take YAML +file+ and update parameter hash.
def load(file)
if file.nil? || !File.exist?(file)
raise "Can't open #{file}."
end
loader = ReVIEW::YAMLLoader.new
merge_config(@config.deep_merge(loader.load_file(file)))
end
def warn(msg)
@logger.warn(msg)
end
# Construct producer object.
# +config+ takes initial parameter hash. This parameters can be overriden by EPUBMaker#load or EPUBMaker#merge_config.
# +version+ takes EPUB version (default is 2).
def initialize(config = nil, version = nil)
@contents = []
@config = ReVIEW::Configure.new
@epub = nil
@config['epubversion'] = version unless version.nil?
@res = ReVIEW::I18n
@logger = ReVIEW.logger
merge_config(config) if config
end
def coverimage
return nil unless config['coverimage']
@contents.each do |item|
if item.media.start_with?('image') && item.file =~ /#{config['coverimage']}\Z/
return item.file
end
end
nil
end
# Update parameters by merging from new parameter hash +config+.
def merge_config(config)
@config.deep_merge!(config)
complement
unless @config['epubversion'].nil?
case @config['epubversion'].to_i
when 2
@epub = EPUBMaker::EPUBv2.new(self)
when 3
@epub = EPUBMaker::EPUBv3.new(self)
else
raise "Invalid EPUB version (#{@config['epubversion']}.)"
end
end
if config['language']
ReVIEW::I18n.locale = config['language']
end
support_legacy_maker
end
# Write mimetype file to IO object +wobj+.
def mimetype(wobj)
s = @epub.mimetype
if !s.nil? && !wobj.nil?
wobj.print s
end
end
# Write opf file to IO object +wobj+.
def opf(wobj)
s = @epub.opf
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write ncx file to IO object +wobj+. +indentarray+ defines prefix
# string for each level.
def ncx(wobj, indentarray = [])
s = @epub.ncx(indentarray)
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write container file to IO object +wobj+.
def container(wobj)
s = @epub.container
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write cover file to IO object +wobj+.
# If Producer#config["coverimage"] is defined, it will be used for
# the cover image.
def cover(wobj)
type = @config['epubversion'] >= 3 ? 'cover' : nil
s = @epub.cover(type)
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write title file (copying) to IO object +wobj+.
def titlepage(wobj)
s = @epub.titlepage
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write colophon file to IO object +wobj+.
def colophon(wobj)
s = @epub.colophon
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Write own toc file to IO object +wobj+.
def mytoc(wobj)
s = @epub.mytoc
if !s.nil? && !wobj.nil?
wobj.puts s
end
end
# Add informations of figure files in +path+ to contents array.
# +base+ defines a string to remove from path name.
def import_imageinfo(path, base = nil, allow_exts = nil)
return nil unless File.exist?(path)
allow_exts ||= @config['image_ext']
Dir.foreach(path) do |f|
next if f.start_with?('.')
if f =~ /\.(#{allow_exts.join('|')})\Z/i
path.chop! if path =~ %r{/\Z}
if base.nil?
@contents.push(EPUBMaker::Content.new('file' => "#{path}/#{f}"))
else
@contents.push(EPUBMaker::Content.new('file' => "#{path.sub(base + '/', '')}/#{f}"))
end
end
if FileTest.directory?("#{path}/#{f}")
import_imageinfo("#{path}/#{f}", base)
end
end
end
alias_method :importImageInfo, :import_imageinfo
# Produce EPUB file +epubfile+.
# +basedir+ points the directory has contents (default: current directory.)
# +tmpdir+ defines temporary directory.
def produce(epubfile, basedir = nil, tmpdir = nil)
current = Dir.pwd
basedir ||= current
new_tmpdir = tmpdir.nil? ? Dir.mktmpdir : tmpdir
if epubfile !~ %r{\A/}
epubfile = "#{current}/#{epubfile}"
end
# FIXME: error check
File.unlink(epubfile) if File.exist?(epubfile)
begin
@epub.produce(epubfile, basedir, new_tmpdir)
ensure
FileUtils.rm_r(new_tmpdir) if tmpdir.nil?
end
end
def call_hook(filename, *params)
return if !filename.present? || !File.exist?(filename) || !FileTest.executable?(filename)
if ENV['REVIEW_SAFE_MODE'].to_i & 1 > 0
warn 'hook is prohibited in safe mode. ignored.'
else
system(filename, *params)
end
end
def isbn_hyphen
str = @config['isbn'].to_s
if str =~ /\A\d{10}\Z/
return "#{str[0..0]}-#{str[1..5]}-#{str[6..8]}-#{str[9..9]}"
end
if str =~ /\A\d{13}\Z/
return "#{str[0..2]}-#{str[3..3]}-#{str[4..8]}-#{str[9..11]}-#{str[12..12]}"
end
nil
end
private
# Complement parameters.
def support_legacy_maker
# legacy review-epubmaker support
if @config['flag_legacy_coverfile'].nil? && !@config['coverfile'].nil? && File.exist?(@config['coverfile'])
@config['cover'] = "#{@config['bookname']}-cover.#{@config['htmlext']}"
@epub.legacy_cover_and_title_file(@config['coverfile'], @config['cover'])
@config['flag_legacy_coverfile'] = true
warn %Q(Parameter 'coverfile' is obsolete. Please use 'cover' and make complete html file with header and footer.)
end
if @config['flag_legacy_titlepagefile'].nil? && !@config['titlepagefile'].nil? && File.exist?(@config['titlepagefile'])
@config['titlefile'] = "#{@config['bookname']}-title.#{@config['htmlext']}"
@config['titlepage'] = true
@epub.legacy_cover_and_title_file(@config['titlepagefile'], @config['titlefile'])
@config['flag_legacy_titlepagefile'] = true
warn %Q(Parameter 'titlepagefile' is obsolete. Please use 'titlefile' and make complete html file with header and footer.)
end
if @config['flag_legacy_backcoverfile'].nil? && !@config['backcoverfile'].nil? && File.exist?(@config['backcoverfile'])
@config['backcover'] = "#{@config['bookname']}-backcover.#{@config['htmlext']}"
@epub.legacy_cover_and_title_file(@config['backcoverfile'], @config['backcover'])
@config['flag_legacy_backcoverfile'] = true
warn %Q(Parameter 'backcoverfile' is obsolete. Please use 'backcover' and make complete html file with header and footer.)
end
if @config['flag_legacy_pubhistory'].nil? && @config['pubhistory']
@config['history'] = [[]]
@config['pubhistory'].split("\n").each { |date| @config['history'][0].push(date.sub(/(\d+)年(\d+)月(\d+)日/, '\1-\2-\3')) }
@config['flag_legacy_pubhistory'] = true
warn %Q(Parameter 'pubhistory' is obsolete. Please use 'history' array.)
end
true
end
end
|
HewlettPackard/hpe3par_ruby_sdk | lib/Hpe3parSdk/client.rb | Hpe3parSdk.Client.delete_cpg | ruby | def delete_cpg(name)
begin
@cpg.delete_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end | Deletes a CPG.
==== Attributes
* name - The name of the CPG
type name: String
==== Raises
* Hpe3parSdk::HPE3PARException
Error with code: 15 message: CPG does not exist
* Hpe3parSdk::HTTPForbidden
- IN_USE - The CPG Cannot be removed because it's in use.
* Hpe3parSdk::HTTPForbidden
- PERM_DENIED - Permission denied | train | https://github.com/HewlettPackard/hpe3par_ruby_sdk/blob/f8cfc6e597741be593cf7fe013accadf982ee68b/lib/Hpe3parSdk/client.rb#L2435-L2442 | class Client
def initialize(api_url,debug:false, secure: false, timeout: nil, suppress_ssl_warnings: false, app_type: 'ruby_SDK_3par', log_file_path: nil)
unless api_url.is_a?(String)
raise Hpe3parSdk::HPE3PARException.new(nil,
"'api_url' parameter is mandatory and should be of type String")
end
@api_url = api_url
@debug = debug
@secure = secure
@timeout = timeout
@suppress_ssl_warnings = suppress_ssl_warnings
@log_level = Logger::INFO
@log_file_path = log_file_path
init_log
@http = HTTPJSONRestClient.new(
@api_url, @secure, @debug,
@suppress_ssl_warnings, @timeout = nil
)
check_WSAPI_version
@vlun_query_supported = false
@cpg = CPGManager.new(@http)
@qos = QOSManager.new(@http)
@flash_cache = FlashCacheManager.new(@http)
@port = PortManager.new(@http)
@task = TaskManager.new(@http)
@host_and_vv_set_filter_supported = false
@ssh = nil
@vlun = VlunManager.new(@http, @vlun_query_supported)
@host = HostManager.new(@http, @vlun_query_supported)
@volume_set = VolumeSetManager.new(@http, @host_and_vv_set_filter_supported)
@host_set = HostSetManager.new(@http, @host_and_vv_set_filter_supported)
@app_type = app_type
end
private def init_log
unless @log_file_path.nil?
client_logger = Logger.new(@log_file_path, 'daily', formatter: CustomFormatter.new)
else
client_logger = Logger.new(STDOUT)
end
if @debug
@log_level = Logger::DEBUG
end
Hpe3parSdk.logger = MultiLog.new(:level => @log_level, :loggers => client_logger)
end
private def check_WSAPI_version
begin
@api_version = get_ws_api_version
rescue HPE3PARException => ex
ex_message = ex.message
if ex_message && ex_message.include?('SSL Certificate Verification Failed')
raise Hpe3parSdk::SSLCertFailed
else
msg = "Error: #{ex_message} - Error communicating with 3PAR WSAPI. '
'Check proxy settings. If error persists, either the '
'3PAR WSAPI is not running OR the version of the WSAPI is '
'not supported."
raise Hpe3parSdk::HPE3PARException(message: msg)
end
end
compare_version(@api_version)
end
private def set_ssh_options(username, password, port=22, conn_timeout=nil)
@ssh=Hpe3parSdk::SSH.new(@api_url.split("//")[1].split(":")[0], username, password)
end
private def compare_version(api_version)
@min_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION)
@min_version_with_compression = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_COMPRESSION_SUPPORT)
@current_version = WSAPIVersion.new(api_version['major'], api_version['minor'],
api_version['revision'])
if @current_version < @min_version
err_msg = "Unsupported 3PAR WS API version #{@current_version}, min supported version is, #{WSAPIVersionSupport::WSAPI_MIN_SUPPORTED_VERSION}"
raise Hpe3parSdk::UnsupportedVersion.new(nil, err_msg)
end
# Check for VLUN query support.
min_vlun_query_support_version = WSAPIVersion
.parse(WSAPIVersionSupport::WSAPI_MIN_VERSION_VLUN_QUERY_SUPPORT)
if @current_version >= min_vlun_query_support_version
@vlun_query_supported = true
end
# Check for Host and VV Set query support
if @current_version >= @min_version_with_compression
@host_and_vv_set_filter_supported = true
end
end
# Get the 3PAR WS API version.
#
# ==== Returns
#
# WSAPI version hash
def get_ws_api_version
# remove everything down to host:port
host_url = @api_url.split('/api')
@http.set_url(host_url[0])
begin
# get the api version
response = @http.get('/api')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
ensure
# reset the url
@http.set_url(@api_url)
end
# Gets the WSAPI Configuration.
#
# ==== Returns
#
# WSAPI configuration hash
def get_ws_api_configuration_info
begin
response = @http.get('/wsapiconfiguration')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new FlashCache
#
# ==== Attributes
#
# * size_in_gib - Specifies the node pair size of the Flash Cache on the system
# type size_in_gib: Integer
# * mode - Values supported Simulator: 1, Real: 2 (default)
# type mode: Integer
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not enough space is available for the operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - A JSON input object contains a name-value pair with a numeric value that exceeds the expected range. Flash Cache exceeds the expected range. The HTTP ref member contains the name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_FLASH_CACHE - The Flash Cache already exists.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported.
# * Hpe3parSdk::HTTPBadRequest
# - INV_FLASH_CACHE_SIZE - Invalid Flash Cache size. The size must be a multiple of 16 G.
def create_flash_cache(size_in_gib, mode = nil)
begin
@flash_cache.create_flash_cache(size_in_gib, mode)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get Flash Cache information
#
# ==== Returns
#
# FlashCache - Details of the specified flash cache
def get_flash_cache
begin
@flash_cache.get_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes an existing Flash Cache
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_IS_BEING_REMOVED - Unable to delete the Flash Cache, the Flash Cache is being removed.
# * Hpe3parSdk::HTTPForbidden
# - FLASH_CACHE_NOT_SUPPORTED - Flash Cache is not supported on this system.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_FLASH_CACHE - The Flash Cache does not exist.
def delete_flash_cache
begin
@flash_cache.delete_flash_cache
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Storage System Information
#
# ==== Returns
#
# Hash of Storage System Info
def get_storage_system_info
begin
response = @http.get('/system')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the overall system capacity for the 3PAR server.
#
# ==== Returns
#
# Hash of system capacity information
#
#
# capacity = {
# "allCapacity"=> { # Overall system capacity
# # includes FC, NL, SSD
# # device types
# "totalMiB"=>20054016, # Total system capacity
# # in MiB
# "allocated"=>{ # Allocated space info
# "totalAllocatedMiB"=>12535808, # Total allocated
# # capacity
# "volumes"=> { # Volume capacity info
# "totalVolumesMiB"=>10919936, # Total capacity
# # allocated to volumes
# "nonCPGsMiB"=> 0, # Total non-CPG capacity
# "nonCPGUserMiB"=> 0, # The capacity allocated
# # to non-CPG user space
# "nonCPGSnapshotMiB"=>0, # The capacity allocated
# # to non-CPG snapshot
# # volumes
# "nonCPGAdminMiB"=> 0, # The capacity allocated
# # to non-CPG
# # administrative volumes
# "CPGsMiB"=>10919936, # Total capacity
# # allocated to CPGs
# "CPGUserMiB"=>7205538, # User CPG space
# "CPGUserUsedMiB"=>7092550, # The CPG allocated to
# # user space that is
# # in use
# "CPGUserUnusedMiB"=>112988, # The CPG allocated to
# # user space that is not
# # in use
# "CPGSnapshotMiB"=>2411870, # Snapshot CPG space
# "CPGSnapshotUsedMiB"=>210256, # CPG allocated to
# # snapshot that is in use
# "CPGSnapshotUnusedMiB"=>2201614, # CPG allocated to
# # snapshot space that is
# # not in use
# "CPGAdminMiB"=>1302528, # Administrative volume
# # CPG space
# "CPGAdminUsedMiB"=> 115200, # The CPG allocated to
# # administrative space
# # that is in use
# "CPGAdminUnusedMiB"=>1187328, # The CPG allocated to
# # administrative space
# # that is not in use
# "unmappedMiB"=>0 # Allocated volume space
# # that is unmapped
# },
# "system"=> { # System capacity info
# "totalSystemMiB"=> 1615872, # System space capacity
# "internalMiB"=>780288, # The system capacity
# # allocated to internal
# # resources
# "spareMiB"=> 835584, # Total spare capacity
# "spareUsedMiB"=> 0, # The system capacity
# # allocated to spare resources
# # in use
# "spareUnusedMiB"=> 835584 # The system capacity
# # allocated to spare resources
# # that are unused
# }
# },
# "freeMiB"=> 7518208, # Free capacity
# "freeInitializedMiB"=> 7518208, # Free initialized capacity
# "freeUninitializedMiB"=> 0, # Free uninitialized capacity
# "unavailableCapacityMiB"=> 0, # Unavailable capacity in MiB
# "failedCapacityMiB"=> 0 # Failed capacity in MiB
# },
# "FCCapacity"=> { # System capacity from FC devices only
# ... # Same structure as above
# },
# "NLCapacity"=> { # System capacity from NL devices only
# ... # Same structure as above
# },
# "SSDCapacity"=> { # System capacity from SSD devices only
# ... # Same structure as above
# }
# }
def get_overall_system_capacity
begin
response = @http.get('/capacity')
response[1]
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# This authenticates against the 3PAR WSAPI server and creates a session.
# ==== Attributes
#
# * username - The username
# type username: String
# * password - The Password
# type password: String
def login(username, password, optional = nil)
set_ssh_options(username, password, port=22, conn_timeout=nil)
@volume = VolumeManager.new(@http, @ssh, @app_type)
@http.authenticate(username, password, optional)
end
# Get the list of all 3PAR Tasks
#
# ==== Returns
#
# Array of Task
def get_all_tasks
begin
@task.get_all_tasks
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Get the status of a 3PAR Task
#
# ==== Attributes
#
# * task_id - the task id
# type task_id: Integer
#
# ==== Returns
#
# Task
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - Bad Request Task ID must be a positive value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Bad Request Task ID is too large.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_TASK - Task with the specified Task ID does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Task ID is not an integer.
def get_task(task_id)
begin
@task.get_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def vlun_exists?(volname,lunid,host=nil,port=nil)
begin
@vlun.vlun_exists?(volname,lunid,host,port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new VLUN.
#
# When creating a VLUN, the volumeName is required. The lun member is
# not required if auto is set to True.
# Either hostname or portPos (or both in the case of matched sets) is
# also required. The noVcn and overrideLowerPriority members are
# optional.
# * volume_name: Name of the volume to be exported
# type volume_name: String
# * lun: LUN id
# type lun: Integer
# * host_name: Name of the host which the volume is to be exported.
# type host_name: String
# * port_pos: System port of VLUN exported to. It includes node number, slot number, and card port number
# type port_pos: Hash
# port_pos = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=> 1} # Port number on the FC card (0-4)
# * no_vcn: A VLUN change notification (VCN) not be issued after export (-novcn).
# type no_vcn: Boolean
# * override_lower_priority: Existing lower priority VLUNs will be overridden (-ovrd). Use only if hostname member exists.
# type override_lower_priority: Boolean
#
# ==== Returns
#
# VLUN id
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ MISSING_REQUIRED - Missing volume or hostname or lunid.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL MISSING_REQUIRED - Specified volume does not exist.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Specified hostname not found.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - Specified port does not exist.
def create_vlun(volume_name, lun = nil, host_name = nil, port_pos = nil, no_vcn = false, override_lower_priority = false, auto = false)
begin
@vlun.create_vlun(volume_name, host_name, lun, port_pos, no_vcn, override_lower_priority, auto)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets VLUNs.
#
# ==== Returns
#
# Array of VLUN objects
def get_vluns
begin
@vlun.get_vluns
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a VLUN.
#
# ==== Attributes
#
# * volume_name: The volume name of the VLUN to find
# type volume_name: String
#
# ==== Returns
#
# VLUN object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - VLUN doesn't exist
def get_vlun(volume_name)
begin
@vlun.get_vlun(volume_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a VLUN.
#
# ==== Attributes
#
# * volume_name: Volume name of the VLUN
# type volume_name: String
# * lun_id: LUN ID
# type lun_id: Integer
# * host_name: Name of the host which the volume is exported. For VLUN of port type,the value is empty
# type host_name: String
# * port: Specifies the system port of the VLUN export. It includes the system node number, PCI bus slot number, and card port number on the FC card in the format<node>:<slot>:<cardPort>
# type port: Hash
#
# port = {'node'=> 1, # System node (0-7)
# 'slot'=> 2, # PCI bus slot in the node (0-5)
# 'port'=>1} # Port number on the FC card (0-4)
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Incomplete VLUN info. Missing
# volumeName or lun, or both hostname and port.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PORT_SELECTION - Specified port is invalid.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - The LUN specified exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VLUN - The VLUN does not exist
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_PORT - The port does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def delete_vlun(volume_name, lun_id, host_name = nil, port = nil)
begin
@vlun.delete_vlun(volume_name, lun_id, host_name, port)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets QoS Rules.
#
# ==== Returns
#
# Array of QoSRule objects
#
def query_qos_rules
begin
@qos.query_qos_rules
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Queries a QoS rule
#
# ==== Attributes
#
# * target_name : Name of the target. When targetType is sys, target name must be sys:all_others.
# type target_name: String
# * target_type : Target type is vvset or sys
# type target_type: String
# ==== Returns
#
# QoSRule object
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
def query_qos_rule(target_name, target_type = 'vvset')
begin
@qos.query_qos_rule(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def qos_rule_exists?(target_name, target_type = 'vvset')
begin
@qos.qos_rule_exists?(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates QOS rules
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_type: Type of QoS target, either enum TARGET_TYPE_VVS or TARGET_TYPE_SYS.
# type target_type: VVSET or SYS. Refer QoStargetType::VVSET for complete enumeration
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> false, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth mimumum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=>1, # zero none operation enum, when set to
# # 1, I/O minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=>5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected range.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit should be greater than the mimimum goal.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BELOW_RANGE - I/O-per-second limit is below range.Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - The system is not licensed for QoS.
def create_qos_rules(target_name, qos_rules, target_type = QoStargetType::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.create_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing QOS rules
#
# The QoS rule can be applied to VV sets. By using sys:all_others,
# you can apply the rule to all volumes in the system for which no
# QoS rule has been defined.
# ioMinGoal and ioMaxLimit must be used together to set I/O limits.
# Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
# If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
# to set to 2 (zero), and vice versa. They cannot be set to
# 'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
# limit), then bwMinGoalOP must also be set to 2.
# If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
# to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
# set to 1 (zero), then bwMinGoalOP must also be set to 1.
# The ioMinGoalOP and ioMaxLimitOP fields take precedence over
# the ioMinGoal and ioMaxLimit fields.
# The bwMinGoalOP and bwMaxLimitOP fields take precedence over
# the bwMinGoalKB and bwMaxLimitKB fields
#
# ==== Attributes
#
# * target_name: Name of the target object on which the QoS rule will be created.
# type target_name: String
# * target_type: Type of QoS target, either vvset or sys.Refer Hpe3parSdk::QoStargetTypeConstants for complete enumeration
# type target_type: String
# * qos_rules: QoS options
# type qos_rules: Hash
# qos_rules = {
# 'priority'=> 2, # Refer Hpe3parSdk::QoSpriorityEnumeration for complete enumeration
# 'bwMinGoalKB'=> 1024, # bandwidth rate minimum goal in
# # kilobytes per second
# 'bwMaxLimitKB'=> 1024, # bandwidth rate maximum limit in
# # kilobytes per second
# 'ioMinGoal'=> 10000, # I/O-per-second minimum goal.
# 'ioMaxLimit'=> 2000000, # I/0-per-second maximum limit
# 'enable'=> True, # QoS rule for target enabled?
# 'bwMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth minimum goal is 0
# # when set to 2, the bandwidth minimum
# # goal is none (NoLimit)
# 'bwMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, bandwidth maximum limit is 0
# # when set to 2, the bandwidth maximum
# # limit is none (NoLimit)
# 'ioMinGoalOP'=> 1, # zero none operation enum, when set to
# # 1, I/O minimum goal minimum goal is 0
# # when set to 2, the I/O minimum goal is
# # none (NoLimit)
# 'ioMaxLimitOP'=> 1, # zero none operation enum, when set to
# # 1, I/O maximum limit is 0
# # when set to 2, the I/O maximum limit
# # is none (NoLimit)
# 'latencyGoal'=> 5000, # Latency goal in milliseconds
# 'defaultLatency'=> false# Use latencyGoal or defaultLatency?
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected
# range.
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
# * Hpe3parSdk::HTTPBadRequest
# EXISTENT_QOS_RULE - QoS rule already exists.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_IO_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit
# should be greater than the minimum goal.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_BELOW_RANGE - I/O-per-second limit is below
# range. Bandwidth limit is below range.
# * Hpe3parSdk::HTTPBadRequest
# UNLICENSED_FEATURE - The system is not licensed for QoS.
def modify_qos_rules(target_name, qos_rules, target_type = QoStargetTypeConstants::VVSET)
if @current_version < @min_version && !qos_rules.nil?
qos_rules.delete_if { |key, _value| key == :latencyGoaluSecs }
end
begin
@qos.modify_qos_rules(target_name, qos_rules, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes QoS rules.
#
# ==== Attributes
#
# * target_name: Name of the target. When target_type is sys, target_name must be sys:all_others.
# type target_name: String
# * target_type: target type is vvset or sys
# type target_type: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# NON_EXISTENT_QOS_RULE - QoS rule does not exist.
# * Hpe3parSdk::HTTPBadRequest
# INV_INPUT_ILLEGAL_CHAR - Illegal character in the input
def delete_qos_rules(target_name, target_type = QoStargetTypeConstants::VVSET)
begin
@qos.delete_qos_rules(target_name, target_type)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all hosts.
#
# ==== Returns
#
# Array of Host.
def get_hosts
begin
@host.get_hosts
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets host information by name.
#
# ==== Attributes
#
# * name - The name of the host to find.
# type name: String
#
# ==== Returns
#
# Host.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def get_host(name)
begin
@host.get_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new Host.
#
# ==== Attributes
#
# * name - The name of the host.
# type name: String
# * iscsi_names - Array of iSCSI iqns.
# type iscsi_names: Array
# * fcwwns - Array of Fibre Channel World Wide Names.
# type fcwwns: Array
# * optional - The optional stuff.
# type optional: Hash
# optional = {
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# # 3.1.3 default: Generic-ALUA
# # 3.1.2 default: General
# 'domain'=> 'myDomain', # Create the host in the
# # specified domain, or default
# # domain if unspecified.
# 'forceTearDown'=> false, # If True, force to tear down
# # low-priority VLUN exports.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> "Joe's box"} # Additional host information
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Name not specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - FCWWNs and iSCSINames are both specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host name, domain name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EMPTY_STR - Input string (for domain name, iSCSI name, etc.) is empty.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Any error from host-name or domain-name parsing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - The length of WWN is not 16. WWN specification contains non-hexadecimal digit.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - host WWN/iSCSI name already used by another host.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - host name is already used.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - No space to create host.
def create_host(name, iscsi_names = nil, fcwwns = nil, optional = nil)
begin
@host.create_host(name, iscsi_names, fcwwns, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies an existing Host.
#
# ==== Attributes
#
# * name - Name of the host.
# type name: String
# * mod_request - Objects for host modification request.
# type mod_request: Hash
# mod_request = {
# 'newName'=> 'myNewName', # New name of the host
# 'pathOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'FCWWNs'=> [], # One or more WWN to set for the host.
# 'iSCSINames'=> [], # One or more iSCSI names to set for the host.
# 'forcePathRemoval'=> false, # If True, remove SSN(s) or
# # iSCSI(s) even if there are
# # VLUNs exported to host
# 'persona'=> 1, # Refer Hpe3parSdk::HostPersona for complete enumeration.
# 'descriptors'=>
# {'location'=> 'earth', # The host's location
# 'IPAddr'=> '10.10.10.10', # The host's IP address
# 'os'=> 'linux', # The operating system running on the host.
# 'model'=> 'ex', # The host's model
# 'contact'=> 'Smith', # The host's owner and contact
# 'comment'=> 'Joes box'} # Additional host information
# 'chapOperation'=> 1, # Refer Hpe3parSdk::HostEditOperation for complete enumeration
# 'chapOperationMode'=> TARGET, # Refer Hpe3parSdk::ChapOperationMode for complete enumeration
# 'chapName'=> 'MyChapName', # The chap name
# 'chapSecret'=> 'xyz', # The chap secret for the host or the target
# 'chapSecretHex'=> false, # If True, the chapSecret is treated as Hex.
# 'chapRemoveTargetOnly'=> true # If True, then remove target chap only
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Missing host name.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Both iSCSINames & FCWWNs are specified. (lot of other possibilities).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - iSCSINames or FCWwns missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ONE_REQUIRED - No path operation specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_ENUM_VALUE - Invalid enum value.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_MISSING_REQUIRED - Required fields missing.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Host descriptor argument length, new host name, or iSCSI name is too long.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Error parsing host or iSCSI name.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_HOST - New host name is already used.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host to be modified does not exist.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI names are specified.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WRONG_TYPE - Input value is of the wrong type.
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_PATH - WWN or iSCSI name is already claimed by other host.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_BAD_LENGTH - CHAP hex secret length is not 16 bytes, or chap ASCII secret length is not 12 to 16 characters.
# * Hpe3parSdk::HTTPNotFound
# - NO_INITIATOR_CHAP - Setting target CHAP without initiator CHAP.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_CHAP - Remove non-existing CHAP.
# * Hpe3parSdk::HTTPConflict
# - NON_UNIQUE_CHAP_SECRET - CHAP secret is not unique.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - Setting persona with active export; remove a host path on an active export.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_PATH - Remove a non-existing path.
# * Hpe3parSdk::HTTPConflict
# - LUN_HOSTPERSONA_CONFLICT - LUN number and persona capability conflict.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_PATH - Duplicate path specified.
def modify_host(name, mod_request)
begin
@host.modify_host(name, mod_request)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a host.
#
# ==== Attributes
#
# * name - The name of host to be deleted.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - Host not found
# * Hpe3parSdk::HTTPConflict
# - HOST_IN_SET - Host is a member of a set
def delete_host(name)
begin
@host.delete_host(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified FC WWN path.
#
# ==== Attributes
#
# * wwn - Lookup based on WWN.
# type wwn: String
#
# ==== Returns
#
# Host with specified FC WWN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - HOST Not Found
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
def query_host_by_fc_path(wwn = nil)
begin
@host.query_host_by_fc_path(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Finds the host with the specified iSCSI initiator.
#
# ==== Attributes
#
# * iqn - Lookup based on iSCSI initiator.
# type iqn: String
#
# ==== Returns
#
# Host with specified IQN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid URI syntax.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
# * Hpe3parSdk::HTTPInternalServerError
# - INTERNAL_SERVER_ERR - Internal server error.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - The host name contains invalid character.
def query_host_by_iscsi_path(iqn = nil)
begin
@host.query_host_by_iscsi_path(iqn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all host sets.
#
# ==== Returns
#
# Array of HostSet.
def get_host_sets
begin
@host_set.get_host_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new HostSet.
#
# ==== Attributes
#
# * name - Name of the host set to be created.
# type name: String
# * domain - The domain in which the host set will be created.
# type domain: String
# * comment - Comment for the host set.
# type comment: String
# * setmembers - The hosts to be added to the set. The existence of the host will not be checked.
# type setmembers: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_DOMAIN - The domain does not exist.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The host does not exists.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
def create_host_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@host_set.create_host_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a HostSet.
#
# ==== Attributes
#
# * name - The hostset to delete.
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - EXPORTED_VLUN - The host set has exported VLUNs.
def delete_host_set(name)
begin
@host_set.delete_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a HostSet.
#
# ==== Attributes
#
# * name - Hostset name
# type name: String
# * action - Add or Remove host(s) from the set
# type action: Refer values of Hpe3parSdk::SetCustomAction::MEM_ADD and Hpe3parSdk::SetCustomAction::MEM_REMOVE
# * setmembers - Host(s) to add to the set, the existence of the host(s) will not be checked
# type setmembers: Array of String
# * new_name - New name of set
# type new_name: String
# * comment - New comment for the set
# type comment: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to perform this operation.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal characters.
def modify_host_set(name, action = nil, setmembers = nil, new_name = nil, comment = nil)
begin
@host_set.modify_host_set(name, action, setmembers, new_name, comment)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds host(s) to a host set.
#
# ==== Attributes
#
# * set_name - Hostset name.
# type set_name: String
# * setmembers - Array of host names to add to the set.
# type setmembers: Array of String
def add_hosts_to_host_set(set_name, setmembers)
begin
@host_set.add_hosts_to_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes host(s) from a host set.
#
# ==== Attributes
#
# * set_name - The host set name.
# type set_name: String
# * setmembers - Array of host names to remove from the set.
# type setmembers: Array of String
def remove_hosts_from_host_set(set_name, setmembers)
begin
@host_set.remove_hosts_from_host_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every Hostset the given host is a part of. The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * host_name - The host name of whose hostset is to be found.
# type host_name: String
#
# ==== Returns
#
# Array of HostSet.
def find_host_sets(host_name)
begin
@host_set.find_host_sets(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets hostset information by name.
#
# ==== Attributes
#
# * name - The name of the hostset to find.
# type name: String
#
# ==== Returns
#
# HostSet.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exist.
def get_host_set(name)
begin
@host_set.get_host_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all of the VLUNs on a specific host.
#
# ==== Attributes
#
# * host_name - Name of the host.
# type host_name: String
#
# ==== Returns
#
# Array of VLUN.
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_HOST - The specified host not found.
def get_host_vluns(host_name)
begin
@host.get_host_vluns(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets all Volumes in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_volumes
begin
@volume.get_volumes(VolumeCopyType::BASE_VOLUME)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the list of snapshots in the array
#
# ==== Returns
#
# Array of VirtualVolume
def get_snapshots
begin
@volume.get_volumes(VolumeCopyType::VIRTUAL_COPY)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by name
#
# ==== Attributes
#
# * name - The name of the volume to find
# type name: String
#
# ==== Returns
#
# VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume(name)
begin
@volume.get_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a volume by wwn
#
# ==== Attributes
#
# * wwn - The wwn of the volume to find
# type wwn: String
#
# ==== Returns
#
# * VirtualVolume
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 23 message: volume does not exist
def get_volume_by_wwn(wwn)
begin
@volume.get_volume_by_wwn(wwn)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * cpg_name - the name of the destination CPG
# type cpg_name: String
# * size_MiB - size in MiB for the volume
# type size_MiB: Integer
# * optional - hash of other optional items
# type optional: hash
#
# optional = {
# 'id' => 12, # Volume ID. If not specified, next
# # available is chosen
# 'comment' => 'some comment', # Additional information up to 511
# # characters
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'snapCPG' => 'CPG name', # CPG Used for snapshots
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256 # Relative time from now to retain
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT - Invalid Parameter
# * Hpe3parSdk::HTTPBadRequest
# - TOO_LARGE - Volume size above limit
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_SV - Volume Exists already
def create_volume(name, cpg_name, size_MiB, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
@volume.create_volume(name, cpg_name, size_MiB, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_volume(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * volumeMods - Hash of volume attributes to change
# type volumeMods: Hash
# volumeMods = {
# 'newName' => 'newName', # New volume name
# 'comment' => 'some comment', # New volume comment
# 'snapCPG' => 'CPG name', # Snapshot CPG name
# 'policies: { # Specifies VV policies
# 'staleSS' => false, # True allows stale snapshots.
# 'oneHost' => true, # True constrains volume export to
# # single host or host cluster
# 'zeroDetect' => true, # True requests Storage System to
# # scan for zeros in incoming write
# # data
# 'system' => false, # True special volume used by system
# # False is normal user volume
# 'caching' => true}, # Read-only. True indicates write &
# # read caching & read ahead enabled
# 'ssSpcAllocWarningPct' => 12, # Snapshot space allocation warning
# 'ssSpcAllocLimitPct' => 22, # Snapshot space allocation limit
# 'tpvv' => true, # True: Create TPVV
# # False: (default) Create FPVV
# 'usrSpcAllocWarningPct' => 22, # Enable user space allocation
# # warning
# 'usrSpcAllocLimitPct' => 22, # User space allocation limit
# 'userCPG' => 'User CPG name', # User CPG name
# 'expirationHours' => 256, # Relative time from now to expire
# # volume (max 43,800 hours)
# 'retentionHours' => 256, # Relative time from now to retain
# # volume (max 43,800 hours)
# 'rmSsSpcAllocWarning' => false, # True removes snapshot space
# # allocation warning.
# # False sets it when value > 0
# 'rmUsrSpcAllocWarwaning' => false,# True removes user space
# # allocation warning.
# # False sets it when value > 0
# 'rmExpTime' => false, # True resets expiration time to 0.
# # False sets it when value > 0
# 'rmSsSpcAllocLimit' => false, # True removes snapshot space
# # allocation limit.
# # False sets it when value > 0
# 'rmUsrSpcAllocLimit' => false # True removes user space
# # allocation limit.
# # False sets it when value > 0
# }
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_WARN_GT_LIMIT - Allocation warning level is higher than
# the limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_USR_ALRT_NON_TPVV - User space allocation alerts are
# valid only with a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than
# expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_POLICY - Invalid policy specification (for example,
# caching or system is set to true).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: string length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_MODIFY_USR_CPG_TPVV - usr_cpg cannot be modified
# on a TPVV.
# * Hpe3parSdk::HTTPBadRequest
# - UNLICENSED_FEATURE - Retention time cannot be modified on a
# system without the Virtual Lock license.
# * Hpe3parSdk::HTTPForbidden
# - CPG_NOT_IN_SAME_DOMAIN - Snap CPG is not in the same domain as
# the user CPG.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPInternalServerError
# - INT_SERV_ERR - Metadata of the VV is corrupted.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Cannot modify retention time on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Cannot modify an internal
# volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_NOT_DEFINED_ALL_NODES - Cannot modify a
# volume until the volume is defined on all volumes.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Cannot modify a
# volume when an online copy for that volume is in progress.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Cannot modify a
# volume in the middle of a conversion operation.
# * Hpe3parSdk::HTTPConflict
# - INVALID_OPERATION_VV_SNAPSPACE_NOT_MOVED_TO_CPG - Snapshot space
# of a volume needs to be moved to a CPG before the user space.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_ACCOUNTING_IN_PROGRESS - The volume
# cannot be renamed until snapshot accounting has finished.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_ZERO_DETECT_TPVV - The zero_detect policy can be
# used only on TPVVs.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_CPG_ON_SNAPSHOT - CPG cannot be assigned to a
# snapshot.
def modify_volume(name, volume_mods)
begin
@volume.modify_volume(name, volume_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Grows an existing volume by 'amount' Mebibytes.
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
# * amount: the additional size in MiB to add, rounded up to the next chunklet size (e.g. 256 or 1000 MiB)
# type amount: Integer
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_IN_SAME_DOMAIN - The volume is not in the same domain.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_UNSUPPORTED_VV_TYPE - Invalid operation: Cannot
# grow this type of volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_TUNE_IN_PROGRESS - Invalid operation: Volume
# tuning is in progress.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_EXCEEDS_LENGTH - Invalid input: String length exceeds
# limit.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_VV_GROW_SIZE - Invalid grow size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_CPG_LIMIT - New volume size exceeds CPG limit
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - This operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV
# conversion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_COPY_IN_PROGRESS - Invalid operation:
# online copy is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is
# in progress.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal consistency
# error.
# * Hpe3parSdk::HTTPForbidden
# - VV_SIZE_CANNOT_REDUCE - New volume size is smaller than the
# current size.
# * Hpe3parSdk::HTTPForbidden
# - VV_NEW_SIZE_EXCEEDS_LIMITS - New volume size exceeds the limit.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_SA_SD_SPACE_REMOVED - Invalid operation: Volume
# SA/SD space is being removed.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_BUSY - Invalid operation: Volume is currently
# busy.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_IS_PCOPY - Invalid operation: Volume is a
# physical copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PROMOTE_IN_PROGRESS - Invalid operation: Volume
# promotion is in progress.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_PARENT_OF_PCOPY - Invalid operation: Volume is
# the parent of physical copy.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Insufficent space for requested operation.
def grow_volume(name, amount)
begin
@volume.grow_volume(name, amount)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a physical copy of a VirtualVolume
#
# ==== Attributes
#
# * src_name - the source volume name
# type src_name: String
# * dest_name - the destination volume name
# type dest_name: String
# * dest_cpg - the destination CPG
# type dest_cpg: String
# * optional - Hash of optional parameters
# type optional: Hash
#
# optional = {
# 'online' => false, # should physical copy be
# # performed online?
# 'tpvv' => false, # use thin provisioned space
# # for destination
# # (online copy only)
# 'snapCPG' => 'OpenStack_SnapCPG', # snapshot CPG for the
# # destination
# # (online copy only)
# 'saveSnapshot' => false, # save the snapshot of the
# # source volume
# 'priority' => 1 # taskPriorityEnum (does not
# # apply to online copy - Hpe3parSdk::TaskPriority)
# }
def create_physical_copy(src_name, dest_name, dest_cpg, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
[:compression, :allowRemoteCopyParent, :skipZero].each { |key| optional.delete key }
end
begin
@volume.create_physical_copy(src_name, dest_name, dest_cpg, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a physical copy
#
# ==== Attributes
#
# * name - the name of the clone volume
# type name: String
#
# ==== Raises:
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_physical_copy(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Tunes a volume
#
# ==== Attributes
#
# * name - the volume name
# type name: String
# * tune_operation - Enum of tune operation - 1: Change User CPG, 2: Change snap CPG
# type dest_name: Integer
# * optional - hash of optional parameters
# type optional: hash
#
# optional = {
# 'userCPG' => 'user_cpg', # Specifies the new user
# # CPG to which the volume
# # will be tuned.
# 'snapCPG' => 'snap_cpg', # Specifies the snap CPG to
# # which the volume will be
# # tuned.
# 'conversionOperation' => 1, # conversion operation enum. Refer Hpe3parSdk::VolumeConversionOperation
# 'keepVV' => 'new_volume', # Name of the new volume
# # where the original logical disks are saved.
# 'compression' => true # Enables (true) or disables (false) compression.
# # You cannot compress a fully provisioned volume.
# }
def tune_volume(name, tune_operation, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :compression }
end
begin
object_hash = @volume.tune_volume(name, tune_operation, optional)
get_task(object_hash['taskid'])
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Returns an array of every VolumeSet the given volume is a part of.
# The array can contain zero, one, or multiple items.
#
# ==== Attributes
#
# * name - the volume name
# type name: String
#
# ==== Returns
#
# Array of VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - Illegal op on system vol
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
def find_all_volume_sets(name)
begin
@volume_set.find_all_volume_sets(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the Volume Sets
#
# ==== Returns
#
# Array of VolumeSet
def get_volume_sets
begin
@volume_set.get_volume_sets
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the information about a Volume Set.
#
# ==== Attributes
#
# * name - The name of the CPG to find
# type name: String
#
# ==== Returns
#
# VolumeSet
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 102 message: Set does not exist
def get_volume_set(name)
begin
@volume_set.get_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new volume set
#
# ==== Attributes
#
# * name - the volume set to create
# type name: String
# * domain: the domain where the set lives
# type domain: String
# * comment: the comment for the vv set
# type comment: String
# * setmembers: the vv(s) to add to the set, the existence of the vv(s) will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - CPG Exists already
def create_volume_set(name, domain = nil, comment = nil, setmembers = nil)
begin
@volume_set.create_volume_set(name, domain, comment, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes the volume set. You must clear all QOS rules before a volume set can be deleted.
#
# ==== Attributes
#
# * name - The name of the VolumeSet
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - - EXPORTED_VLUN - The host set has exported VLUNs. The VV set was exported.
# * Hpe3parSdk::HTTPConflict
# - VVSET_QOS_TARGET - The object is already part of the set.
def delete_volume_set(name)
begin
@volume_set.delete_volume_set(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a volume set by adding or removing a volume from the volume
# set. It's actions is based on the enums MEM_ADD or MEM_REMOVE.
#
# ==== Attributes
#
# * action: add or remove volume from the set
# type name: Hpe3parSdk::SetCustomAction
# * name: the volume set name
# type name: String
# * newName: new name of set
# type newName: String
# * comment: the comment for on the vv set
# type comment: String
# * flash_cache_policy: the flash-cache policy for the vv set
# type flash_cache_policy: enum
# * setmembers: the vv to add to the set, the existence of the vv will not be checked
# type name: Array of String
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPBadRequest
# - EXISTENT_SET - The set already exits.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_DOMAINSET - The host is in a domain set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_IN_SET - The object is already part of the set.
# * Hpe3parSdk::HTTPNotFound
# - MEMBER_NOT_IN_SET - The object is not part of the set.
# * Hpe3parSdk::HTTPConflict
# - MEMBER_NOT_IN_SAME_DOMAIN - Objects must be in the same domain to
# perform this operation.
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - The volume has an internal
# inconsistency error.
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed.
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SYS_VOLUME - The operation is not allowed on a
# system volume.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - The operation is not allowed
# on an internal volume.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_PARAM_CONFLICT - Invalid input (parameters cannot be
# present at the same time).
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_ILLEGAL_CHAR - Invalid contains one or more illegal
# characters.
def modify_volume_set(name, action = nil, newName = nil, comment = nil, flash_cache_policy = nil, setmembers = nil)
begin
@volume_set.modify_volume_set(name, action, newName, comment, flash_cache_policy, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Adds volume(s) to a volume set.
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * setmembers - the volume(s) name to add
# type setmembers: Array of String
def add_volumes_to_volume_set(set_name, setmembers)
begin
@volume_set.add_volumes_to_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Removes a volume from a volume set
#
# ==== Attributes
#
# * set_name - the volume set name
# type set_name: String
# * name - the volume name to remove
# type name: String
def remove_volumes_from_volume_set(set_name, setmembers)
begin
@volume_set.remove_volumes_from_volume_set(set_name, setmembers)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing VolumeSet
#
# ==== Attributes
#
# * name: Name of the Snapshot. The vvname pattern is described in "VV Name Patterns" in the HPE 3PAR Command Line Interface Reference, which is available at the following website: http://www.hp.com/go/storage/docs
# type name: String
# * copy_of_name: the name of the parent volume
# type copy_of_name: String
# * comment: the comment on the vv set
# type comment: String
# * optional: Hash of optional params
# type optional: Hash
# optional = {
# 'id' => 12, # Specifies ID of the volume set
# # set, next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - Invalid volume pattern specified
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_SET - The set does not exists.
# * Hpe3parSdk::HTTPNotFound
# - EMPTY_SET - The set is empty
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_LIMIT_REACHED - Maximum number of volumes reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The storage volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - VV_IS_BEING_REMOVED - The volume is being removed
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_READONLY_TO_READONLY_SNAP - Creating a read-only copy from a read-only volume is not permitted
# * Hpe3parSdk::HTTPConflict
# - NO_SNAP_CPG - No snapshot CPG has been configured for the volume
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_DUP_NAME - Invalid input (duplicate name).
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_SNAP_PARENT_SAME_BASE - Two parent snapshots share the same base volume
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Invalid operation. Online copyis in progress
# * Hpe3parSdk::HTTPServiceUnavailable
# - VV_ID_LIMIT_REACHED - Max number of volumeIDs has been reached
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOLUME - The volume does not exists
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_STALE_STATE - The volume is in a stale state.
# * Hpe3parSdk::HTTPForbidden
# - VV_NOT_STARTED - Volume is not started
# * Hpe3parSdk::HTTPForbidden
# - VV_UNAVAILABLE - The volume is not accessible
# * Hpe3parSdk::HTTPServiceUnavailable
# - SNAPSHOT_LIMIT_REACHED - Max number of snapshots has been reached
# * Hpe3parSdk::HTTPServiceUnavailable
# - CPG_ALLOCATION_WARNING_REACHED - The CPG has reached the allocation warning
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV conversion is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is in progress.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - The volume is the target of an online copy.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_ID - An ID exists
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
# * Hpe3parSdk::HTTPForbidden
# - VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
# * Hpe3parSdk::HTTPBadRequest
# - INVALID_INPUT_VV_PATTERN - - INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than expiration time.
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT_TIME - Invalid time specified.
# * Hpe3parSdk::HTTPForbidden
# - INV_OPERATION_SNAPSHOT_NOT_SAME_TYPE - Some snapshots in the volume set are read-only, some are read-write
def create_snapshot_of_volume_set(name, copy_of_name, optional = nil)
begin
@volume_set.create_snapshot_of_volume_set(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a snapshot of an existing Volume.
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * copy_of_name - the name of the parent volume
# type copy_of_name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'id' => 12, # Specifies the ID of the volume,
# # next by default
# 'comment' => "some comment",
# 'readOnly' => true, # Read Only
# 'expirationHours' => 36, # time from now to expire
# 'retentionHours' => 12 # time from now to expire
# }
#
# ==== Raises
#
# * Hpe3parSdk::HTTPNotFound
# - INON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
def create_snapshot(name, copy_of_name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.create_snapshot(name, copy_of_name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Restores from a snapshot to a volume
#
# ==== Attributes
#
# * name - the name of the Snapshot
# type name: String
# * optional - hash of other optional items
# type name: Hash
#
# optional = {
# 'online' => false, # Enables (true) or disables
# #(false) executing the promote
# #operation on an online volume.
# #The default setting is false
#
# 'priority' => 2 #Does not apply to online promote
# #operation or to stop promote
# #operation.
#
# 'allowRemoteCopyParent' => false #Allows the promote operation to
# #proceed even if the RW parent
# #volume is currently in a Remote
# #Copy volume group, if that group
# #has not been started. If the
# #Remote Copy group has been
# #started, this command fails.
# #(WSAPI 1.6 and later.)
# }
#
def restore_snapshot(name, optional = nil)
if @current_version < @min_version_with_compression && !optional.nil?
optional.delete_if { |key, _value| key == :allowRemoteCopyParent }
end
begin
@volume.restore_snapshot(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a snapshot
#
# ==== Attributes
#
# * name - the name of the snapshot volume
# type name: String
#
# ==== Raises:
#
# * Hpe3parSdk::HTTPNotFound
# - NON_EXISTENT_VOL - The volume does not exist
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPForbidden
# - RETAINED - Volume retention time has not expired
# * Hpe3parSdk::HTTPForbidden
# - HAS_RO_CHILD - Volume has read-only child
# * Hpe3parSdk::HTTPConflict
# - HAS_CHILD - The volume has a child volume
# * Hpe3parSdk::HTTPConflict
# - IN_USE - The volume is in use by VV set, VLUN, etc
def delete_snapshot(name)
begin
@volume.delete_volume(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets the snapshots of a particular volume
#
# ==== Attributes
#
# * name - the name of the volume
# type name: String
#
# ==== Returns
#
# Array of VirtualVolume
def get_volume_snapshots(name)
begin
@volume.get_volume_snapshots(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of all ports on the 3PAR.
#
# ==== Returns
#
# Array of Port.
def get_ports
begin
@port.get_ports
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of Fibre Channel Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of Fibre Channel Port.
def get_fc_ports(state = nil)
begin
@port.get_fc_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of iSCSI Ports.
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of iSCSI Port.
def get_iscsi_ports(state = nil)
begin
@port.get_iscsi_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets an array of IP Ports.
#
# ==== Attributes
#
# * state - Port link state.
# type name: Integer. Refer Hpe3parSdk::PortLinkState for complete enumeration.
#
# ==== Returns
#
# Array of IP Port.
def get_ip_ports(state = nil)
begin
@port.get_ip_ports(state)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets entire list of CPGs.
#
# ==== Returns
#
# CPG array
def get_cpgs
begin
@cpg.get_cpgs
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets information about a Cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# CPG
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg(name)
begin
@cpg.get_cpg(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Creates a new CPG.
#
# ==== Attributes
#
# * name - Name of the cpg
# type name: String
# * optional - Hash of other optional items
# type optional: Hash
#
# optional = {
# 'growthIncrementMiB' 100, # Growth increment in MiB for
# # each auto-grown operation
# 'growthLimitMiB': 1024, # Auto-grow operation is limited
# # to specified storage amount
# 'usedLDWarningAlertMiB': 200, # Threshold to trigger warning
# # of used logical disk space
# 'domain': 'MyDomain', # Name of the domain object
# 'LDLayout': {
# 'RAIDType': 1, # Disk Raid Type
# 'setSize': 100, # Size in number of chunklets
# 'HA': 0, # Layout supports failure of
# # one port pair (1),
# # one cage (2),
# # or one magazine (3)
# 'chunkletPosPref': 2, # Chunklet location perference
# # characteristics.
# # Lowest Number/Fastest transfer
# # = 1
# # Higher Number/Slower transfer
# # = 2
# 'diskPatterns': []} # Patterns for candidate disks
# }
#
# ==== Raises
# * Hpe3parSdk::HTTPBadRequest
# - INV_INPUT Invalid URI Syntax.
# * Hpe3parSdk::HTTPBadRequest
# - NON_EXISTENT_DOMAIN - Domain doesn't exist.
# * Hpe3parSdk::HTTPBadRequest
# - NO_SPACE - Not Enough space is available.
# * Hpe3parSdk::HTTPBadRequest
# - BAD_CPG_PATTERN A Pattern in a CPG specifies illegal values.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# * Hpe3parSdk::HTTPConflict
# - EXISTENT_CPG - Cpg Exists already
def create_cpg(name, optional = nil)
begin
@cpg.create_cpg(name, optional)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Modifies a CPG.
#
# ==== Attributes
#
# * name - Name of the CPG
# type name: String
# * optional - hash of other optional items
# type optional: Hash
#
# optional = {
# 'newName'=> "newCPG:, # Specifies the name of the
# # CPG to update.
# 'disableAutoGrow'=>false, # Enables (false) or
# # disables (true) CPG auto
# # grow. Defaults to false.
# 'rmGrowthLimit'=> false, # Enables (false) or
# # disables (true) auto grow
# # limit enforcement. Defaults
# # to false.
# 'rmWarningAlert'=> false, # Enables (false) or
# # disables (true) warning
# # limit enforcement. Defaults
# # to false.
# }
#
def modify_cpg(name, cpg_mods)
begin
@cpg.modify_cpg(name, cpg_mods)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Gets available space information about a cpg.
#
# ==== Attributes
#
# * name - The name of the cpg to find
# type name: String
#
# ==== Returns
#
# Available space details in form of LDLayoutCapacity object
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: cpg does not exist
def get_cpg_available_space(name)
begin
@cpg.get_cpg_available_space(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Deletes a CPG.
#
# ==== Attributes
#
# * name - The name of the CPG
# type name: String
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error with code: 15 message: CPG does not exist
# * Hpe3parSdk::HTTPForbidden
# - IN_USE - The CPG Cannot be removed because it's in use.
# * Hpe3parSdk::HTTPForbidden
# - PERM_DENIED - Permission denied
# Gets the status of an online physical copy
#
# ==== Attributes
#
# * name - The name of the volume
# type name: str
#
# ==== Returns
#
# Status of online copy (String)
#
# ==== Raises
#
# * Hpe3parSdk::HPE3PARException
# Error: message: Volume not an online physical copy
def get_online_physical_copy_status(name)
begin
@volume.get_online_physical_copy_status(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an offline physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_offline_physical_copy(name)
begin
@volume.stop_offline_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Stops an online physical copy operation
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def stop_online_physical_copy(name)
begin
@volume.stop_online_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Resynchronizes a physical copy.
#
# ==== Attributes
#
# * name - The name of the volume
# type name: String
def resync_physical_copy(name)
begin
@volume.resync_physical_copy(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Waits for a 3PAR task to end.
#
# ==== Attributes
#
# * task_id - The Id of the task to be waited upon.
# type task_id: Integer
# * poll_rate_secs - The polling interval in seconds.
# type poll_rate_secs: Integer
def wait_for_task_to_end(task_id, poll_rate_secs = 15)
begin
@task.wait_for_task_to_end(task_id, poll_rate_secs)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Cancel a 3PAR task
#
# ==== Attributes
#
# * task_id - The Id of the task to be cancelled.
# type task_id: Integer
# ==== Raises
#
# * Hpe3parSdk::HTTPBadRequest
# - NON_ACTIVE_TASK - The task is not active at this time.
# * Hpe3parSdk::HTTPConflict
# - INV_OPERATION_CANNOT_CANCEL_ TASK - Invalid operation: Task cannot be cancelled.
def cancel_task(task_id)
begin
@task.cancel_task(task_id)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_exists?(name)
begin
@volume.volume_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def volume_set_exists?(name)
begin
@volume_set.volume_set_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_exists?(host_name)
begin
@host.host_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def host_set_exists?(host_name)
begin
@host_set.host_set_exists?(host_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def cpg_exists?(name)
begin
@cpg.cpg_exists?(name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def flash_cache_exists?
begin
@flash_cache.flash_cache_exists?
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def online_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.online_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
def offline_physical_copy_exists?(src_name, phy_copy_name)
begin
@volume.offline_physical_copy_exists?(src_name, phy_copy_name)
rescue => ex
Util.log_exception(ex, caller_locations(1, 1)[0].label)
raise ex
end
end
# Logout from the 3PAR Array
def logout
unless @log_file_path.nil?
if Hpe3parSdk.logger != nil
Hpe3parSdk.logger.close
Hpe3parSdk.logger = nil
end
end
begin
@http.unauthenticate
rescue Hpe3parSdk::HPE3PARException => ex
#Do nothing
end
end
end
|
mongodb/mongo-ruby-driver | lib/mongo/collection.rb | Mongo.Collection.drop | ruby | def drop(opts = {})
client.send(:with_session, opts) do |session|
Operation::Drop.new({
selector: { :drop => name },
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(next_primary)
end
rescue Error::OperationFailure => ex
raise ex unless ex.message =~ /ns not found/
false
end | Drop the collection. Will also drop all indexes associated with the
collection.
@note An error returned if the collection doesn't exist is suppressed.
@example Drop the collection.
collection.drop
@param [ Hash ] opts The options for the drop operation.
@option options [ Session ] :session The session to use for the operation.
@return [ Result ] The result of the command.
@since 2.0.0 | train | https://github.com/mongodb/mongo-ruby-driver/blob/dca26d0870cb3386fad9ccc1d17228097c1fe1c8/lib/mongo/collection.rb#L216-L228 | class Collection
extend Forwardable
include Retryable
# The capped option.
#
# @since 2.1.0
CAPPED = 'capped'.freeze
# The ns field constant.
#
# @since 2.1.0
NS = 'ns'.freeze
# @return [ Mongo::Database ] The database the collection resides in.
attr_reader :database
# @return [ String ] The name of the collection.
attr_reader :name
# @return [ Hash ] The collection options.
attr_reader :options
# Get client, cluster, read preference, and write concern from client.
def_delegators :database, :client, :cluster
# Delegate to the cluster for the next primary.
def_delegators :cluster, :next_primary
# Options that can be updated on a new Collection instance via the #with method.
#
# @since 2.1.0
CHANGEABLE_OPTIONS = [ :read, :read_concern, :write ].freeze
# Check if a collection is equal to another object. Will check the name and
# the database for equality.
#
# @example Check collection equality.
# collection == other
#
# @param [ Object ] other The object to check.
#
# @return [ true, false ] If the objects are equal.
#
# @since 2.0.0
def ==(other)
return false unless other.is_a?(Collection)
name == other.name && database == other.database && options == other.options
end
# Instantiate a new collection.
#
# @example Instantiate a new collection.
# Mongo::Collection.new(database, 'test')
#
# @param [ Mongo::Database ] database The collection's database.
# @param [ String, Symbol ] name The collection name.
# @param [ Hash ] options The collection options.
#
# @since 2.0.0
def initialize(database, name, options = {})
raise Error::InvalidCollectionName.new unless name
@database = database
@name = name.to_s.freeze
@options = options.freeze
end
# Get the read concern for this collection instance.
#
# @example Get the read concern.
# collection.read_concern
#
# @return [ Hash ] The read concern.
#
# @since 2.2.0
def read_concern
options[:read_concern] || database.read_concern
end
# Get the server selector on this collection.
#
# @example Get the server selector.
# collection.server_selector
#
# @return [ Mongo::ServerSelector ] The server selector.
#
# @since 2.0.0
def server_selector
@server_selector ||= ServerSelector.get(read_preference || database.server_selector)
end
# Get the read preference on this collection.
#
# @example Get the read preference.
# collection.read_preference
#
# @return [ Hash ] The read preference.
#
# @since 2.0.0
def read_preference
@read_preference ||= options[:read] || database.read_preference
end
# Get the write concern on this collection.
#
# @example Get the write concern.
# collection.write_concern
#
# @return [ Mongo::WriteConcern ] The write concern.
#
# @since 2.0.0
def write_concern
@write_concern ||= WriteConcern.get(options[:write] || database.write_concern)
end
# Provides a new collection with either a new read preference or new write concern
# merged over the existing read preference / write concern.
#
# @example Get a collection with changed read preference.
# collection.with(:read => { :mode => :primary_preferred })
#
# @example Get a collection with changed write concern.
# collection.with(:write => { w: 3 })
# @param [ Hash ] new_options The new options to use.
#
# @return [ Mongo::Collection ] A new collection instance.
#
# @since 2.1.0
def with(new_options)
new_options.keys.each do |k|
raise Error::UnchangeableCollectionOption.new(k) unless CHANGEABLE_OPTIONS.include?(k)
end
Collection.new(database, name, options.merge(new_options))
end
# Is the collection capped?
#
# @example Is the collection capped?
# collection.capped?
#
# @return [ true, false ] If the collection is capped.
#
# @since 2.0.0
def capped?
database.command(:collstats => name).documents[0][CAPPED]
end
# Force the collection to be created in the database.
#
# @example Force the collection to be created.
# collection.create
#
# @param [ Hash ] opts The options for the create operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
def create(opts = {})
operation = { :create => name }.merge(options)
operation.delete(:write)
server = next_primary
if (options[:collation] || options[Operation::COLLATION]) && !server.features.collation_enabled?
raise Error::UnsupportedCollation.new
end
client.send(:with_session, opts) do |session|
Operation::Create.new({
selector: operation,
db_name: database.name,
write_concern: write_concern,
session: session
}).execute(server)
end
end
# Drop the collection. Will also drop all indexes associated with the
# collection.
#
# @note An error returned if the collection doesn't exist is suppressed.
#
# @example Drop the collection.
# collection.drop
#
# @param [ Hash ] opts The options for the drop operation.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The result of the command.
#
# @since 2.0.0
# Find documents in the collection.
#
# @example Find documents in the collection by a selector.
# collection.find(name: 1)
#
# @example Get all documents in a collection.
# collection.find
#
# @param [ Hash ] filter The filter to use in the find.
# @param [ Hash ] options The options for the find.
#
# @option options [ true, false ] :allow_partial_results Allows the query to get partial
# results if some shards are down.
# @option options [ Integer ] :batch_size The number of documents returned in each batch
# of results from MongoDB.
# @option options [ String ] :comment Associate a comment with the query.
# @option options [ :tailable, :tailable_await ] :cursor_type The type of cursor to use.
# @option options [ Integer ] :limit The max number of docs to return from the query.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the query
# to run in milliseconds.
# @option options [ Hash ] :modifiers A document containing meta-operators modifying the
# output or behavior of a query.
# @option options [ true, false ] :no_cursor_timeout The server normally times out idle
# cursors after an inactivity period (10 minutes) to prevent excess memory use.
# Set this option to prevent that.
# @option options [ true, false ] :oplog_replay Internal replication use only - driver
# should not set.
# @option options [ Hash ] :projection The fields to include or exclude from each doc
# in the result set.
# @option options [ Integer ] :skip The number of docs to skip before returning results.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ CollectionView ] The collection view.
#
# @since 2.0.0
def find(filter = nil, options = {})
View.new(self, filter || {}, options)
end
# Perform an aggregation on the collection.
#
# @example Perform an aggregation.
# collection.aggregate([ { "$group" => { "_id" => "$city", "tpop" => { "$sum" => "$pop" }}} ])
#
# @param [ Array<Hash> ] pipeline The aggregation pipeline.
# @param [ Hash ] options The aggregation options.
#
# @option options [ true, false ] :allow_disk_use Set to true if disk usage is allowed during
# the aggregation.
# @option options [ Integer ] :batch_size The number of documents to return per batch.
# @option options [ Integer ] :max_time_ms The maximum amount of time in milliseconds to allow the
# aggregation to run.
# @option options [ true, false ] :use_cursor Indicates whether the command will request that the server
# provide results using a cursor. Note that as of server version 3.6, aggregations always provide results
# using a cursor and this option is therefore not valid.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ String ] :comment Associate a comment with the aggregation.
# @option options [ Session ] :session The session to use.
#
# @return [ Aggregation ] The aggregation object.
#
# @since 2.1.0
def aggregate(pipeline, options = {})
View.new(self, {}, options).aggregate(pipeline, options)
end
# As of version 3.6 of the MongoDB server, a ``$changeStream`` pipeline
# stage is supported in the aggregation framework. This stage allows users
# to request that notifications are sent for all changes to a particular
# collection.
#
# @example Get change notifications for a given collection.
# collection.watch([{ '$match' => { operationType: { '$in' => ['insert', 'replace'] } } }])
#
# @param [ Array<Hash> ] pipeline Optional additional filter operators.
# @param [ Hash ] options The change stream options.
#
# @option options [ String ] :full_document Allowed values: ‘default’,
# ‘updateLookup’. Defaults to ‘default’. When set to ‘updateLookup’,
# the change notification for partial updates will include both a delta
# describing the changes to the document, as well as a copy of the entire
# document that was changed from some time after the change occurred.
# @option options [ BSON::Document, Hash ] :resume_after Specifies the
# logical starting point for the new change stream.
# @option options [ Integer ] :max_await_time_ms The maximum amount of time
# for the server to wait on new documents to satisfy a change stream query.
# @option options [ Integer ] :batch_size The number of documents to return
# per batch.
# @option options [ BSON::Document, Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
# @option options [ BSON::Timestamp ] :start_at_operation_time Only return
# changes that occurred at or after the specified timestamp. Any command run
# against the server will return a cluster time that can be used here.
# Only recognized by server versions 4.0+.
#
# @note A change stream only allows 'majority' read concern.
# @note This helper method is preferable to running a raw aggregation with
# a $changeStream stage, for the purpose of supporting resumability.
#
# @return [ ChangeStream ] The change stream object.
#
# @since 2.5.0
def watch(pipeline = [], options = {})
View::ChangeStream.new(View.new(self, {}, options), pipeline, nil, options)
end
# Gets the number of matching documents in the collection.
#
# @example Get the count.
# collection.count(name: 1)
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options The count options.
#
# @option options [ Hash ] :hint The index to use.
# @option options [ Integer ] :limit The maximum number of documents to count.
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Integer ] :skip The number of documents to skip before counting.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Integer ] The document count.
#
# @since 2.1.0
#
# @deprecated Use #count_documents or estimated_document_count instead. However, note that the
# following operators will need to be substituted when switching to #count_documents:
# * $where should be replaced with $expr (only works on 3.6+)
# * $near should be replaced with $geoWithin with $center
# * $nearSphere should be replaced with $geoWithin with $centerSphere
def count(filter = nil, options = {})
View.new(self, filter || {}, options).count(options)
end
# Gets the number of of matching documents in the collection. Unlike the deprecated #count
# method, this will return the exact number of documents matching the filter rather than the estimate.
#
# @example Get the number of documents in the collection.
# collection_view.count_documents
#
# @param [ Hash ] filter A filter for matching documents.
# @param [ Hash ] options Options for the operation.
#
# @option opts :skip [ Integer ] The number of documents to skip.
# @option opts :hint [ Hash ] Override default index selection and force
# MongoDB to use a specific index for the query. Requires server version 3.6+.
# @option opts :limit [ Integer ] Max number of docs to count.
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the
# command to run.
# @option opts [ Hash ] :read The read preference options.
# @option opts [ Hash ] :collation The collation to use.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def count_documents(filter, options = {})
View.new(self, filter, options).count_documents(options)
end
# Gets an estimate of the count of documents in a collection using collection metadata.
#
# @example Get the number of documents in the collection.
# collection_view.estimated_document_count
#
# @param [ Hash ] options Options for the operation.
#
# @option opts :max_time_ms [ Integer ] The maximum amount of time to allow the command to
# run.
# @option opts [ Hash ] :read The read preference options.
#
# @return [ Integer ] The document count.
#
# @since 2.6.0
def estimated_document_count(options = {})
View.new(self, {}, options).estimated_document_count(options)
end
# Get a list of distinct values for a specific field.
#
# @example Get the distinct values.
# collection.distinct('name')
#
# @param [ Symbol, String ] field_name The name of the field.
# @param [ Hash ] filter The documents from which to retrieve the distinct values.
# @param [ Hash ] options The distinct command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command to run.
# @option options [ Hash ] :read The read preference options.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Object> ] The list of distinct values.
#
# @since 2.1.0
def distinct(field_name, filter = nil, options = {})
View.new(self, filter || {}, options).distinct(field_name, options)
end
# Get a view of all indexes for this collection. Can be iterated or has
# more operations.
#
# @example Get the index view.
# collection.indexes
#
# @param [ Hash ] options Options for getting a list of all indexes.
#
# @option options [ Session ] :session The session to use.
#
# @return [ View::Index ] The index view.
#
# @since 2.0.0
def indexes(options = {})
Index::View.new(self, options)
end
# Get a pretty printed string inspection for the collection.
#
# @example Inspect the collection.
# collection.inspect
#
# @return [ String ] The collection inspection.
#
# @since 2.0.0
def inspect
"#<Mongo::Collection:0x#{object_id} namespace=#{namespace}>"
end
# Insert a single document into the collection.
#
# @example Insert a document into the collection.
# collection.insert_one({ name: 'test' })
#
# @param [ Hash ] document The document to insert.
# @param [ Hash ] opts The insert options.
#
# @option opts [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_one(document, opts = {})
client.send(:with_session, opts) do |session|
write_with_retry(session, write_concern) do |server, txn_num|
Operation::Insert.new(
:documents => [ document ],
:db_name => database.name,
:coll_name => name,
:write_concern => write_concern,
:bypass_document_validation => !!opts[:bypass_document_validation],
:options => opts,
:id_generator => client.options[:id_generator],
:session => session,
:txn_num => txn_num
).execute(server)
end
end
end
# Insert the provided documents into the collection.
#
# @example Insert documents into the collection.
# collection.insert_many([{ name: 'test' }])
#
# @param [ Array<Hash> ] documents The documents to insert.
# @param [ Hash ] options The insert options.
#
# @option options [ Session ] :session The session to use for the operation.
#
# @return [ Result ] The database response wrapper.
#
# @since 2.0.0
def insert_many(documents, options = {})
inserts = documents.map{ |doc| { :insert_one => doc }}
bulk_write(inserts, options)
end
# Execute a batch of bulk write operations.
#
# @example Execute a bulk write.
# collection.bulk_write(operations, options)
#
# @param [ Array<Hash> ] requests The bulk write requests.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :ordered Whether the operations
# should be executed in order.
# @option options [ Hash ] :write_concern The write concern options.
# Can be :w => Integer, :fsync => Boolean, :j => Boolean.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Session ] :session The session to use for the set of operations.
#
# @return [ BulkWrite::Result ] The result of the operation.
#
# @since 2.0.0
def bulk_write(requests, options = {})
BulkWrite.new(self, requests, options).execute
end
# Remove a document from the collection.
#
# @example Remove a single document from the collection.
# collection.delete_one
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_one(filter = nil, options = {})
find(filter, options).delete_one(options)
end
# Remove documents from the collection.
#
# @example Remove multiple documents from the collection.
# collection.delete_many
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def delete_many(filter = nil, options = {})
find(filter, options).delete_many(options)
end
# Execute a parallel scan on the collection view.
#
# Returns a list of up to cursor_count cursors that can be iterated concurrently.
# As long as the collection is not modified during scanning, each document appears once
# in one of the cursors' result sets.
#
# @example Execute a parallel collection scan.
# collection.parallel_scan(2)
#
# @param [ Integer ] cursor_count The max number of cursors to return.
# @param [ Hash ] options The parallel scan command options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Session ] :session The session to use.
#
# @return [ Array<Cursor> ] An array of cursors.
#
# @since 2.1
def parallel_scan(cursor_count, options = {})
find({}, options).send(:parallel_scan, cursor_count, options)
end
# Replaces a single document in the collection with the new document.
#
# @example Replace a single document.
# collection.replace_one({ name: 'test' }, { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] replacement The replacement document..
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def replace_one(filter, replacement, options = {})
find(filter, options).replace_one(replacement, options)
end
# Update documents in the collection.
#
# @example Update multiple documents in the collection.
# collection.update_many({ name: 'test'}, '$set' => { name: 'test1' })
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def update_many(filter, update, options = {})
find(filter, options).update_many(update, options)
end
# Update a single document in the collection.
#
# @example Update a single document in the collection.
# collection.update_one({ name: 'test'}, '$set' => { name: 'test1'})
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ true, false ] :upsert Whether to upsert if the
# document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ Result ] The response from the database.
#
# @since 2.1.0
def update_one(filter, update, options = {})
find(filter, options).update_one(update, options)
end
# Finds a single document in the database via findAndModify and deletes
# it, returning the original document.
#
# @example Find one document and delete it.
# collection.find_one_and_delete(name: 'test')
#
# @param [ Hash ] filter The filter to use.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document, nil ] The document, if found.
#
# @since 2.1.0
def find_one_and_delete(filter, options = {})
find(filter, options).find_one_and_delete(options)
end
# Finds a single document via findAndModify and updates it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and update it, returning the original.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }})
#
# @example Find a document and update it, returning the updated document.
# collection.find_one_and_update({ name: 'test' }, { "$set" => { name: 'test1' }}, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] update The update statement.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Array ] :array_filters A set of filters specifying to which array elements
# an update should apply.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_update(filter, update, options = {})
find(filter, options).find_one_and_update(update, options)
end
# Finds a single document and replaces it, returning the original doc unless
# otherwise specified.
#
# @example Find a document and replace it, returning the original.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' })
#
# @example Find a document and replace it, returning the new document.
# collection.find_one_and_replace({ name: 'test' }, { name: 'test1' }, :return_document => :after)
#
# @param [ Hash ] filter The filter to use.
# @param [ BSON::Document ] replacement The replacement document.
# @param [ Hash ] options The options.
#
# @option options [ Integer ] :max_time_ms The maximum amount of time to allow the command
# to run in milliseconds.
# @option options [ Hash ] :projection The fields to include or exclude in the returned doc.
# @option options [ Hash ] :sort The key and direction pairs by which the result set
# will be sorted.
# @option options [ Symbol ] :return_document Either :before or :after.
# @option options [ true, false ] :upsert Whether to upsert if the document doesn't exist.
# @option options [ true, false ] :bypass_document_validation Whether or
# not to skip document level validation.
# @option options [ Hash ] :write_concern The write concern options.
# Defaults to the collection's write concern.
# @option options [ Hash ] :collation The collation to use.
# @option options [ Session ] :session The session to use.
#
# @return [ BSON::Document ] The document.
#
# @since 2.1.0
def find_one_and_replace(filter, replacement, options = {})
find(filter, options).find_one_and_update(replacement, options)
end
# Get the fully qualified namespace of the collection.
#
# @example Get the fully qualified namespace.
# collection.namespace
#
# @return [ String ] The collection namespace.
#
# @since 2.0.0
def namespace
"#{database.name}.#{name}"
end
end
|
appdrones/page_record | lib/page_record/attributes.rb | PageRecord.Attributes.read_attribute | ruby | def read_attribute(attribute)
if block_given?
element = yield
else
element = send("#{attribute}?")
end
tag = element.tag_name
input_field?(tag) ? element.value : element.text
end | Searches the record for the specified attribute and returns
the text content. This method is called when you access an
attribute of a record
@return [String] the text content of the specified attribute
@raise [AttributeNotFound] when the attribute is not found in the record | train | https://github.com/appdrones/page_record/blob/2a6d285cbfab906dad6f13f66fea1c09d354b762/lib/page_record/attributes.rb#L27-L35 | module Attributes
##
# Searches the record for the specified attribute and returns
# the {http://rubydoc.info/github/jnicklas/capybara/master/Capybara/Result Capybara Result}.
# This method is called when you access an attribute with a `?` of a record
#
# @return [Capybara::Result] the text content of the specified attribute
#
# @raise [AttributeNotFound] when the attribute is not found in the record
#
def read_attribute?(attribute)
@record.find("[data-attribute-for='#{attribute}']")
rescue Capybara::ElementNotFound
raise AttributeNotFound, "#{@type} record with id #{@id} doesn't contain attribute #{attribute}"
end
##
# Searches the record for the specified attribute and returns
# the text content. This method is called when you access an
# attribute of a record
#
# @return [String] the text content of the specified attribute
#
# @raise [AttributeNotFound] when the attribute is not found in the record
#
##
# Searches the record for the specified attribute and sets the value of the attribute
# This method is called when you set an attribute of a record
#
# @return [Capybara::Result] the text content of the specified attribute
#
# @raise [AttributeNotFound] when the attribute is not found in the record
# @raise [NotInputField] when the attribute is not a `TEXTAREA` or `INPUT` tag
#
def write_attribute(attribute, value)
element = send("#{attribute}?")
tag = element.tag_name
case tag
when 'textarea', 'input' then element.set(value)
when 'select'then element.select(value)
else raise NotInputField
end
element
end
private
# @private
def input_field?(tag)
case tag
when 'textarea', 'input', 'select' then true
else false
end
end
end
|
koraktor/metior | lib/metior/collections/commit_collection.rb | Metior.CommitCollection.<< | ruby | def <<(commit)
return self if key? commit.id
if @additions.nil? && empty? && commit.line_stats?
@additions = commit.additions
@deletions = commit.deletions
elsif [email protected]?
@additions += commit.additions
@deletions += commit.deletions
end
super
end | Creates a new collection with the given commits
@param [Array<Commit>] commits The commits that should be initially
inserted into the collection
Adds a commit to this collection
@param [Commit] commit The commit to add to this collection
@return [CommitCollection] The collection itself | train | https://github.com/koraktor/metior/blob/02da0f330774c91e1a7325a5a7edbe696f389f95/lib/metior/collections/commit_collection.rb#L47-L59 | class CommitCollection < Collection
# Creates a new collection with the given commits
#
# @param [Array<Commit>] commits The commits that should be initially
# inserted into the collection
def initialize(commits = [], range = nil)
@additions = nil
@deletions = nil
@range = range
commits_with_stats = commits.select { |c| c.line_stats? }
unless commits_with_stats.empty?
@additions = 0
@deletions = 0
commits_with_stats.each do |commit|
@additions += commit.additions
@deletions += commit.deletions
end
end
super commits
end
# Adds a commit to this collection
#
# @param [Commit] commit The commit to add to this collection
# @return [CommitCollection] The collection itself
# Calculate some predefined activity statistics for the commits in this
# collection
#
# @return [Hash<Symbol, Object>] The calculated statistics for the commits
# in this collection
# @see Commit#committed_date
def activity
activity = {}
return activity if empty?
commit_count = values.size
active_days = {}
each do |commit|
date = commit.committed_date.utc
day = Time.utc(date.year, date.month, date.day).send :to_date
if active_days.key? day
active_days[day] += 1
else
active_days[day] = 1
end
end
most_active_day = active_days.sort_by { |day, count| count }.last.first
activity[:first_commit_date] = last.committed_date
activity[:last_commit_date] = first.committed_date
age_in_days = (Time.now - activity[:first_commit_date]) / 86400.0
activity[:active_days] = active_days
activity[:most_active_day] = most_active_day
activity[:commits_per_day] = commit_count / age_in_days
activity[:commits_per_active_day] = commit_count.to_f / active_days.size
activity
end
# Returns the lines of code that have been added by the commits in this
# collection
#
# This will load the line stats from the commits if not done yet.
#
# @return [Fixnum] The lines of code that have been added
# @see #load_line_stats
def additions
support! :line_stats
load_line_stats if @additions.nil?
@additions
end
# Returns the commits in this collection that have been committed after the
# given time
#
# @param [Time, Date, DateTime, String] date The time to use as the lower
# limit to filter the commits
# @return [CommitCollection] The commits that have been committed after the
# given date
# @see Commit#committed_date
# @see Time.parse
def after(date)
date = Time.parse date if date.is_a? String
commits = CommitCollection.new
each do |commit|
commits << commit if commit.committed_date > date
end
commits
end
alias_method :newer, :after
# Returns the authors of all or a specific commit in this collection
#
# @param [Object] commit_id The ID of the commit, if only the author of a
# specific commit should be returned
# @return [ActorCollection] All authors of the commits in this collection
# or the author of a specific commit
# @see Commit#author
def authors(commit_id = nil)
authors = ActorCollection.new
if commit_id.nil?
each { |commit| authors << commit.author }
elsif key? commit_id
authors << self[commit_id].author
end
authors
end
# Returns the commits in this collection that have been committed before
# the given time
#
# @param [Time, Date, DateTime, String] date The time to use as the upper
# limit to filter the commits
# @return [CommitCollection] The commits that have been committed after the
# given date
# @see Commit#committed_date
# @see Time.parse
def before(date)
date = Time.parse date if date.is_a? String
commits = CommitCollection.new
each do |commit|
commits << commit if commit.committed_date < date
end
commits
end
alias_method :older, :before
# Returns the list of commits that have been authored by the given authors
#
# @param [Array<Actor, Object>] author_ids One or more actual `Actor`
# instances or IDs of the authors that the commits should be
# filtered by
# @return [CommitCollection] The commits that have been authored by the
# given authors
# @see Commit#author
def by(*author_ids)
author_ids = author_ids.flatten.map do |author_id|
author_id.is_a?(Actor) ? author_id.id : author_id
end
commits = CommitCollection.new
each do |commit|
commits << commit if author_ids.include? commit.author.id
end
commits
end
# Returns the commits in this collection that change any of the given files
#
# @param [Array<String>] files The path of the files to filter commits by
# @return [CommitCollection] The commits that contain changes to the given
# files
# @see Commit#added_files
# @see Commit#deleted_files
# @see Commit#modified_files
def changing(*files)
support! :file_stats
commits = CommitCollection.new
each do |commit|
commit_files = commit.added_files + commit.deleted_files + commit.modified_files
commits << commit unless (commit_files & files).empty?
end
commits
end
alias_method :touching, :changing
# Returns the committers of all or a specific commit in this collection
#
# @param [Object] commit_id The ID of the commit, if only the committer of
# a specific commit should be returned
# @return [ActorCollection] All committers of the commits in this
# collection or the committer of a specific commit
# @see Commit#committer
def committers(commit_id = nil)
committers = ActorCollection.new
if commit_id.nil?
each { |commit| committers << commit.committer }
elsif key? commit_id
committers << self[commit_id].committer
end
committers
end
# Returns the lines of code that have been deleted by the commits in this
# collection
#
# This will load the line stats from the commits if not done yet.
#
# @return [Fixnum] The lines of code that have been deleted
# @see #load_line_stats
def deletions
support! :line_stats
load_line_stats if @deletions.nil?
@deletions
end
# This evaluates the changed lines in each commit of this collection
#
# For easier use, the values are stored in separate arrays where each
# number represents the number of changed (i.e. added or deleted) lines in
# one commit.
#
# @example
# commits.line_history
# => { :additions => [10, 5, 0], :deletions => [0, -2, -1] }
# @return [Hash<Symbol, Array>] Added lines are returned in an `Array`
# assigned to key `:additions`, deleted lines are assigned to
# `:deletions`
# @see Commit#additions
# @see Commit#deletions
def line_history
support! :line_stats
history = { :additions => [], :deletions => [] }
values.reverse.each do |commit|
history[:additions] << commit.additions
history[:deletions] << -commit.deletions
end
history
end
# Returns the total of lines changed by the commits in this collection
#
# @return [Fixnum] The total number of lines changed
# @see #additions
# @see #deletions
def modifications
additions + deletions
end
# Returns the given number of commits with most line changes on the
# repository
#
# @param [Numeric] count The number of commits to return
# @return [CommitCollection] The given number of commits ordered by impact
# @see Commit#modifications
def most_significant(count = 10)
support! :line_stats
commits = CommitCollection.new
sort_by { |commit| -commit.modifications }.each do |commit|
commits << commit
break if commits.size == count
end
commits
end
alias_method :top, :most_significant
# Returns the commits in this collection that change at least the given
# number of lines
#
# @param [Numeric] line_count The number of lines that should be
# changed at least by the commits
# @return [CommitCollection] The commits that change at least the given
# number of lines
# @see Commit#modifications
def with_impact(line_count)
support! :line_stats
commits = CommitCollection.new
each do |commit|
commits << commit if commit.modifications >= line_count
end
commits
end
private
# Loads the line stats for the commits in this collection
#
# For collections holding a specific range of commits, this always gets the
# line stats for all commits. For other, lets say fragmented, collections
# this loads the line stats for all commits that are missing their stats.
#
# @see Commit#additions
# @see Commit#deletions
# @see Commit#line_stats?
# @see Repository#line_stats
def load_line_stats
@additions = 0
@deletions = 0
return if empty?
line_stats = nil
if @range.nil?
ids = values.reject { |c| c.line_stats? }.map { |c| c.id }
line_stats = first.repo.load_line_stats ids unless ids.empty?
else
line_stats = first.repo.load_line_stats @range
end
unless line_stats.nil?
line_stats.each do |id, stats|
commit = self[id]
commit.line_stats = stats
end
end
each do |commit|
@additions += commit.additions
@deletions += commit.deletions
end
end
end
|
lostisland/faraday | lib/faraday/request.rb | Faraday.Request.url | ruby | def url(path, params = nil)
if path.respond_to? :query
if (query = path.query)
path = path.dup
path.query = nil
end
else
anchor_index = path.index('#')
path = path.slice(0, anchor_index) unless anchor_index.nil?
path, query = path.split('?', 2)
end
self.path = path
self.params.merge_query query, options.params_encoder
self.params.update(params) if params
end | Update path and params.
@param path [URI, String]
@param params [Hash, nil]
@return [void] | train | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/request.rb#L86-L100 | class Request < Struct.new(:method, :path, :params, :headers, :body, :options)
# rubocop:enable Style/StructInheritance
extend MiddlewareRegistry
register_middleware File.expand_path('request', __dir__),
url_encoded: [:UrlEncoded, 'url_encoded'],
multipart: [:Multipart, 'multipart'],
retry: [:Retry, 'retry'],
authorization: [:Authorization, 'authorization'],
basic_auth: [
:BasicAuthentication,
'basic_authentication'
],
token_auth: [
:TokenAuthentication,
'token_authentication'
],
instrumentation: [:Instrumentation, 'instrumentation']
# @param request_method [String]
# @yield [request] for block customization, if block given
# @yieldparam request [Request]
# @return [Request]
def self.create(request_method)
new(request_method).tap do |request|
yield(request) if block_given?
end
end
# Replace params, preserving the existing hash type.
#
# @param hash [Hash] new params
def params=(hash)
if params
params.replace hash
else
super
end
end
# Replace request headers, preserving the existing hash type.
#
# @param hash [Hash] new headers
def headers=(hash)
if headers
headers.replace hash
else
super
end
end
# Update path and params.
#
# @param path [URI, String]
# @param params [Hash, nil]
# @return [void]
# @param key [Object] key to look up in headers
# @return [Object] value of the given header name
def [](key)
headers[key]
end
# @param key [Object] key of header to write
# @param value [Object] value of header
def []=(key, value)
headers[key] = value
end
# Marshal serialization support.
#
# @return [Hash] the hash ready to be serialized in Marshal.
def marshal_dump
{
method: method,
body: body,
headers: headers,
path: path,
params: params,
options: options
}
end
# Marshal serialization support.
# Restores the instance variables according to the +serialised+.
# @param serialised [Hash] the serialised object.
def marshal_load(serialised)
self.method = serialised[:method]
self.body = serialised[:body]
self.headers = serialised[:headers]
self.path = serialised[:path]
self.params = serialised[:params]
self.options = serialised[:options]
end
# @return [Env] the Env for this Request
def to_env(connection)
Env.new(method, body, connection.build_exclusive_url(path, params),
options, headers, connection.ssl, connection.parallel_manager)
end
end
|
sds/haml-lint | lib/haml_lint/tree/node.rb | HamlLint::Tree.Node.line_numbers | ruby | def line_numbers
return (line..line) unless @value && text
end_line = line + lines.count
end_line = nontrivial_end_line if line == end_line
(line..end_line)
end | The line numbers that are contained within the node.
@api public
@return [Range] | train | https://github.com/sds/haml-lint/blob/024c773667e54cf88db938c2b368977005d70ee8/lib/haml_lint/tree/node.rb#L104-L111 | class Node
include Enumerable
attr_accessor :children, :parent
attr_reader :line, :type
# Creates a node wrapping the given {Haml::Parser::ParseNode} struct.
#
# @param document [HamlLint::Document] Haml document that created this node
# @param parse_node [Haml::Parser::ParseNode] parse node created by HAML's parser
def initialize(document, parse_node)
@line = parse_node.line
@document = document
@value = parse_node.value
@type = parse_node.type
end
# Holds any configuration that is created from Haml comments.
#
# @return [HamlLint::CommentConfiguration]
def comment_configuration
@comment_configuration ||= HamlLint::CommentConfiguration.new(self)
end
# Checks whether a visitor is disabled due to comment configuration.
#
# @param [HamlLint::HamlVisitor]
# @return [true, false]
def disabled?(visitor)
visitor.is_a?(HamlLint::Linter) &&
comment_configuration.disabled?(visitor.name)
end
# Implements the Enumerable interface to walk through an entire tree.
#
# @return [Enumerator, HamlLint::Tree::Node]
def each
return to_enum(__callee__) unless block_given?
node = self
loop do
yield node
break unless (node = node.next_node)
end
end
# The comment directives to apply to the node.
#
# @return [Array<HamlLint::Directive>]
def directives
directives = []
directives << predecessor.directives if predecessor
directives.flatten
end
# Source code of all lines this node spans (excluding children).
#
# @return [String]
def source_code
next_node_line =
if next_node
next_node.line - 1
else
@document.source_lines.count + 1
end
@document.source_lines[@line - 1...next_node_line]
.join("\n")
.gsub(/^\s*\z/m, '') # Remove blank lines at the end
end
def inspect
"#<#{self.class.name}>"
end
# The lines of text, if any, that are contained in the node.
#
# @api public
# @return [Array<String>]
def lines
return [] unless @value && text
text.split(/\r\n|\r|\n/)
end
# The line numbers that are contained within the node.
#
# @api public
# @return [Range]
# The previous node to be traversed in the tree.
#
# @return [HamlLint::Tree::Node, nil]
def predecessor
siblings.previous(self) || parent
end
# Returns the node that follows this node, whether it be a sibling or an
# ancestor's child, but not a child of this node.
#
# If you are also willing to return the child, call {#next_node}.
#
# Returns nil if there is no successor.
#
# @return [HamlLint::Tree::Node,nil]
def successor
next_sibling = siblings.next(self)
return next_sibling if next_sibling
parent&.successor
end
# Returns the next node that appears after this node in the document.
#
# Returns nil if there is no next node.
#
# @return [HamlLint::Tree::Node,nil]
def next_node
children.first || successor
end
# The sibling nodes that come after this node in the tree.
#
# @return [Array<HamlLint::Tree::Node>]
def subsequents
siblings.subsequents(self)
end
# Returns the text content of this node.
#
# @return [String]
def text
@value[:text].to_s
end
private
# Discovers the end line of the node when there are no lines.
#
# @return [Integer] the end line of the node
def nontrivial_end_line
if (last_child = children.last)
last_child.line_numbers.end - 1
elsif successor
successor.line_numbers.begin - 1
else
@document.source_lines.count
end
end
# The siblings of this node within the tree.
#
# @api private
# @return [Array<HamlLint::Tree::Node>]
def siblings
@siblings ||= Siblings.new(parent ? parent.children : [self])
end
# Finds the node's siblings within the tree and makes them queryable.
class Siblings < SimpleDelegator
# Finds the next sibling in the tree for a given node.
#
# @param node [HamlLint::Tree::Node]
# @return [HamlLint::Tree::Node, nil]
def next(node)
subsequents(node).first
end
# Finds the previous sibling in the tree for a given node.
#
# @param node [HamlLint::Tree::Node]
# @return [HamlLint::Tree::Node, nil]
def previous(node)
priors(node).last
end
# Finds all sibling notes that appear before a node in the tree.
#
# @param node [HamlLint::Tree::Node]
# @return [Array<HamlLint::Tree::Node>]
def priors(node)
position = position(node)
if position.zero?
[]
else
siblings[0..(position - 1)]
end
end
# Finds all sibling notes that appear after a node in the tree.
#
# @param node [HamlLint::Tree::Node]
# @return [Array<HamlLint::Tree::Node>]
def subsequents(node)
siblings[(position(node) + 1)..-1]
end
private
# The set of siblings within the tree.
#
# @api private
# @return [Array<HamlLint::Tree::Node>]
alias siblings __getobj__
# Finds the position of a node within a set of siblings.
#
# @api private
# @return [Integer, nil]
def position(node)
siblings.index(node)
end
end
end
|
hashicorp/vault-ruby | lib/vault/api/sys/mount.rb | Vault.Sys.remount | ruby | def remount(from, to)
client.post("/v1/sys/remount", JSON.fast_generate(
from: from,
to: to,
))
return true
end | Change the name of the mount
@example
Vault.sys.remount("pg", "postgres") #=> true
@param [String] from
the origin mount path
@param [String] to
the new mount path
@return [true] | train | https://github.com/hashicorp/vault-ruby/blob/02f0532a802ba1a2a0d8703a4585dab76eb9d864/lib/vault/api/sys/mount.rb#L95-L101 | class Sys < Request
# List all mounts in the vault.
#
# @example
# Vault.sys.mounts #=> { :secret => #<struct Vault::Mount type="generic", description="generic secret storage"> }
#
# @return [Hash<Symbol, Mount>]
def mounts
json = client.get("/v1/sys/mounts")
json = json[:data] if json[:data]
return Hash[*json.map do |k,v|
[k.to_s.chomp("/").to_sym, Mount.decode(v)]
end.flatten]
end
# Create a mount at the given path.
#
# @example
# Vault.sys.mount("pg", "postgresql", "Postgres user management") #=> true
#
# @param [String] path
# the path to mount at
# @param [String] type
# the type of mount
# @param [String] description
# a human-friendly description (optional)
def mount(path, type, description = nil)
payload = { type: type }
payload[:description] = description if !description.nil?
client.post("/v1/sys/mounts/#{encode_path(path)}", JSON.fast_generate(payload))
return true
end
# Tune a mount at the given path.
#
# @example
# Vault.sys.mount_tune("pki", max_lease_ttl: '87600h') #=> true
#
# @param [String] path
# the path to write
# @param [Hash] data
# the data to write
def mount_tune(path, data = {})
json = client.post("/v1/sys/mounts/#{encode_path(path)}/tune", JSON.fast_generate(data))
return true
end
# Unmount the thing at the given path. If the mount does not exist, an error
# will be raised.
#
# @example
# Vault.sys.unmount("pg") #=> true
#
# @param [String] path
# the path to unmount
#
# @return [true]
def unmount(path)
client.delete("/v1/sys/mounts/#{encode_path(path)}")
return true
end
# Change the name of the mount
#
# @example
# Vault.sys.remount("pg", "postgres") #=> true
#
# @param [String] from
# the origin mount path
# @param [String] to
# the new mount path
#
# @return [true]
end
|
PierreRambaud/gemirro | lib/gemirro/server.rb | Gemirro.Server.update_indexes | ruby | def update_indexes
indexer = Gemirro::Indexer.new(Utils.configuration.destination)
indexer.only_origin = true
indexer.ui = ::Gem::SilentUI.new
Utils.logger.info('Generating indexes')
indexer.update_index
indexer.updated_gems.each do |gem|
Utils.cache.flush_key(File.basename(gem))
end
rescue SystemExit => e
Utils.logger.info(e.message)
end | Update indexes files
@return [Indexer] | train | https://github.com/PierreRambaud/gemirro/blob/5c6b5abb5334ed3beb256f6764bc336e2cf2dc21/lib/gemirro/server.rb#L164-L176 | class Server < Sinatra::Base
# rubocop:disable Metrics/LineLength
URI_REGEXP = /^(.*)-(\d+(?:\.\d+){1,4}.*?)(?:-(x86-(?:(?:mswin|mingw)(?:32|64)).*?|java))?\.(gem(?:spec\.rz)?)$/
GEMSPEC_TYPE = 'gemspec.rz'.freeze
GEM_TYPE = 'gem'.freeze
access_logger = Logger.new(Utils.configuration.server.access_log).tap do |logger|
::Logger.class_eval { alias_method :write, :'<<' }
logger.level = ::Logger::INFO
end
# rubocop:enable Metrics/LineLength
error_logger = File.new(Utils.configuration.server.error_log, 'a+')
error_logger.sync = true
before do
env['rack.errors'] = error_logger
Utils.configuration.logger = access_logger
end
##
# Configure server
#
configure do
config = Utils.configuration
config.server.host = 'localhost' if config.server.host.nil?
config.server.port = '2000' if config.server.port.nil?
set :static, true
set :views, Gemirro::Configuration.views_directory
set :port, config.server.port
set :bind, config.server.host
set :public_folder, config.destination.gsub(%r{/$}, '')
set :environment, config.environment
set :dump_errors, true
set :raise_errors, true
enable :logging
use Rack::CommonLogger, access_logger
end
##
# Set template for not found action
#
# @return [nil]
#
not_found do
content_type 'text/html'
erb(:not_found)
end
##
# Display information about one gem
#
# @return [nil]
#
get('/gem/:gemname') do
gems = Utils.gems_collection
gem = gems.find_by_name(params[:gemname])
return not_found if gem.nil?
erb(:gem, {}, gem: gem)
end
##
# Display home page containing the list of gems already
# downloaded on the server
#
# @return [nil]
#
get('/') do
erb(:index, {}, gems: Utils.gems_collection)
end
##
# Return gem dependencies as binary
#
# @return [nil]
#
get '/api/v1/dependencies' do
content_type 'application/octet-stream'
query_gems.any? ? Marshal.dump(query_gems_list) : 200
end
##
# Return gem dependencies as json
#
# @return [nil]
#
get '/api/v1/dependencies.json' do
content_type 'application/json'
query_gems.any? ? JSON.dump(query_gems_list) : {}
end
##
# Try to get all request and download files
# if files aren't found.
#
# @return [nil]
#
get('*') do |path|
resource = "#{settings.public_folder}#{path}"
# Try to download gem
fetch_gem(resource) unless File.exist?(resource)
# If not found again, return a 404
return not_found unless File.exist?(resource)
send_file(resource)
end
##
# Try to fetch gem and download its if it's possible, and
# build and install indicies.
#
# @param [String] resource
# @return [Indexer]
#
def fetch_gem(resource)
return unless Utils.configuration.fetch_gem
name = File.basename(resource)
result = name.match(URI_REGEXP)
return unless result
gem_name, gem_version, gem_platform, gem_type = result.captures
return unless gem_name && gem_version
begin
gem = Utils.stored_gem(gem_name, gem_version, gem_platform)
gem.gemspec = true if gem_type == GEMSPEC_TYPE
# rubocop:disable Metrics/LineLength
return if Utils.gems_fetcher.gem_exists?(gem.filename(gem_version)) && gem_type == GEM_TYPE
return if Utils.gems_fetcher.gemspec_exists?(gem.gemspec_filename(gem_version)) && gem_type == GEMSPEC_TYPE
# rubocop:enable Metrics/LineLength
Utils.logger
.info("Try to download #{gem_name} with version #{gem_version}")
Utils.gems_fetcher.source.gems.clear
Utils.gems_fetcher.source.gems.push(gem)
Utils.gems_fetcher.fetch
update_indexes if Utils.configuration.update_on_fetch
rescue StandardError => e
Utils.logger.error(e)
end
end
##
# Update indexes files
#
# @return [Indexer]
#
##
# Return all gems pass to query
#
# @return [Array]
#
def query_gems
params[:gems].to_s.split(',')
end
##
# Return gems list from query params
#
# @return [Array]
#
def query_gems_list
Utils.gems_collection(false) # load collection
gems = Parallel.map(query_gems, in_threads: 4) do |query_gem|
gem_dependencies(query_gem)
end
gems.flatten!
gems.reject!(&:empty?)
gems
end
##
# List of versions and dependencies of each version
# from a gem name.
#
# @return [Array]
#
def gem_dependencies(gem_name)
Utils.cache.cache(gem_name) do
gems = Utils.gems_collection(false)
gem_collection = gems.find_by_name(gem_name)
return '' if gem_collection.nil?
gem_collection = Parallel.map(gem_collection, in_threads: 4) do |gem|
[gem, spec_for(gem.name, gem.number, gem.platform)]
end
gem_collection.reject! do |_, spec|
spec.nil?
end
Parallel.map(gem_collection, in_threads: 4) do |gem, spec|
dependencies = spec.dependencies.select do |d|
d.type == :runtime
end
dependencies = Parallel.map(dependencies, in_threads: 4) do |d|
[d.name.is_a?(Array) ? d.name.first : d.name, d.requirement.to_s]
end
{
name: gem.name,
number: gem.number,
platform: gem.platform,
dependencies: dependencies
}
end
end
end
helpers do
##
# Return gem specification from gemname and version
#
# @param [String] gemname
# @param [String] version
# @return [::Gem::Specification]
#
def spec_for(gemname, version, platform = 'ruby')
gem = Utils.stored_gem(gemname, version.to_s, platform)
gemspec_path = File.join('quick',
Gemirro::Configuration.marshal_identifier,
gem.gemspec_filename)
spec_file = File.join(settings.public_folder,
gemspec_path)
fetch_gem(gemspec_path) unless File.exist?(spec_file)
return unless File.exist?(spec_file)
File.open(spec_file, 'r') do |uz_file|
uz_file.binmode
Marshal.load(::Gem.inflate(uz_file.read))
end
end
##
# Escape string
#
# @param [String] string
# @return [String]
#
def escape(string)
Rack::Utils.escape_html(string)
end
##
# Homepage link
#
# @param [Gem] spec
# @return [String]
#
def homepage(spec)
URI.parse(Addressable::URI.escape(spec.homepage))
end
end
end
|
litaio/lita | lib/lita/template.rb | Lita.Template.context_binding | ruby | def context_binding(variables)
context = TemplateEvaluationContext.new
helpers.each { |helper| context.extend(helper) }
variables.each do |k, v|
context.instance_variable_set("@#{k}", v)
end
context.__get_binding
end | Create an empty object to use as the ERB context and set any provided variables in it. | train | https://github.com/litaio/lita/blob/c1a1f85f791b74e40ee6a1e2d53f19b5f7cbe0ba/lib/lita/template.rb#L56-L66 | class Template
# A clean room object to use as the binding for ERB rendering.
# @api private
class TemplateEvaluationContext
# Returns the evaluation context's binding.
# @return [Binding] The binding.
def __get_binding
binding
end
end
class << self
# Initializes a new Template with the contents of the file at the given path.
# @param path [String] The path to the file to use as the template content.
# @return Template
def from_file(path)
new(File.read(path).chomp)
end
end
# @param source [String] A string to use as the template's content.
def initialize(source)
@erb = ERB.new(source, $SAFE, "<>")
self.helpers = Set.new
end
# Add a module of helpers methods to be added to the template evalutation context.
# @param helper [Module] The module to extend onto the template evalutation context.
# @return [void]
# @since 4.5.0
def add_helper(helper)
helpers << helper
end
# Render the template with the provided variables.
# @param variables [Hash] A collection of variables for interpolation. Each key-value pair will
# make the value available inside the template as an instance variable with the key as its
# name.
def render(variables = {})
erb.result(context_binding(variables))
end
private
attr_accessor :helpers
# Create an empty object to use as the ERB context and set any provided variables in it.
# The underlying ERB object.
attr_reader :erb
end
|
dagrz/nba_stats | lib/nba_stats/stats/box_score_usage.rb | NbaStats.BoxScoreUsage.box_score_usage | ruby | def box_score_usage(
game_id,
range_type=0,
start_period=0,
end_period=0,
start_range=0,
end_range=0
)
NbaStats::Resources::BoxScoreUsage.new(
get(BOX_SCORE_USAGE_PATH, {
:GameID => game_id,
:RangeType => range_type,
:StartPeriod => start_period,
:EndPeriod => end_period,
:StartRange => start_range,
:EndRange => end_range
})
)
end | Calls the boxscoreusage API and returns a BoxScoreUsage resource.
@param game_id [String]
@param range_type [Integer]
@param start_period [Integer]
@param end_period [Integer]
@param start_range [Integer]
@param end_range [Integer]
@return [NbaStats::Resources::BoxScoreUsage] | train | https://github.com/dagrz/nba_stats/blob/d6fe6cf81f74a2ce7a054aeec5e9db59a6ec42aa/lib/nba_stats/stats/box_score_usage.rb#L19-L37 | module BoxScoreUsage
# The path of the boxscoreusage API
BOX_SCORE_USAGE_PATH = '/stats/boxscoreusage'
# Calls the boxscoreusage API and returns a BoxScoreUsage resource.
#
# @param game_id [String]
# @param range_type [Integer]
# @param start_period [Integer]
# @param end_period [Integer]
# @param start_range [Integer]
# @param end_range [Integer]
# @return [NbaStats::Resources::BoxScoreUsage]
end # BoxScoreUsage
|
moneta-rb/moneta | lib/moneta/stack.rb | Moneta.Stack.store | ruby | def store(key, value, options = {})
@stack.each {|s| s.store(key, value, options) }
value
end | (see Proxy#store) | train | https://github.com/moneta-rb/moneta/blob/26a118c8b2c93d11257f4a5fe9334a8157f4db47/lib/moneta/stack.rb#L58-L61 | class Stack
include Defaults
# @api private
class DSL
def initialize(stack, &block)
@stack = stack
instance_eval(&block)
end
# @api public
def add(store = nil, &block)
raise ArgumentError, 'Only argument or block allowed' if store && block
@stack << (store || Moneta.build(&block))
nil
end
end
attr_reader :stack
# @param [Hash] options Options hash
# @option options [Array] :stack Array of Moneta stores
# @yieldparam Builder block
def initialize(options = {}, &block)
@stack = options[:stack].to_a
DSL.new(@stack, &block) if block_given?
end
# (see Proxy#key?)
def key?(key, options = {})
@stack.any? {|s| s.key?(key, options) }
end
# (see Proxy#load)
def load(key, options = {})
@stack.each do |s|
value = s.load(key, options)
return value if value != nil
end
nil
end
# (see Proxy#store)
# (see Proxy#increment)
def increment(key, amount = 1, options = {})
last = nil
@stack.each {|s| last = s.increment(key, amount, options) }
last
end
# (see Proxy#create)
def create(key, value, options = {})
last = false
@stack.each {|s| last = s.create(key, value, options) }
last
end
# (see Proxy#delete)
def delete(key, options = {})
@stack.inject(nil) do |value, s|
v = s.delete(key, options)
value || v
end
end
# (see Proxy#clear)
def clear(options = {})
@stack.each {|s| s.clear(options) }
self
end
# (see Proxy#close)
def close
@stack.each {|s| s.close }
nil
end
# (see Proxy#features)
def features
@features ||=
begin
features = @stack.map(&:features)
(features.inject(features.first, &:&) - [:each_key]).freeze
end
end
end
|
kristianmandrup/geo_units | lib/geo_units/converter.rb | GeoUnits.Converter.to_lat | ruby | def to_lat deg, format = :dms, dp = 0
deg = deg.normalize_lat
_lat = Dms.to_dms deg, format, dp
_lat == '' ? '' : _lat[1..-1] + (deg<0 ? 'S' : 'N') # knock off initial '0' for lat!
end | Convert numeric degrees to deg/min/sec latitude (suffixed with N/S)
@param {Number} deg: Degrees
@param {String} [format=dms]: Return value as 'd', 'dm', 'dms'
@param {Number} [dp=0|2|4]: No of decimal places to use - default 0 for dms, 2 for dm, 4 for d
@returns {String} Deg/min/seconds | train | https://github.com/kristianmandrup/geo_units/blob/ddee241b826af36bc96dad3dd01258f56a730cd9/lib/geo_units/converter.rb#L14-L18 | module Converter
autoload_modules :Normalizer, :Dms, :Units
include Normalizer
# Convert numeric degrees to deg/min/sec latitude (suffixed with N/S)
#
# @param {Number} deg: Degrees
# @param {String} [format=dms]: Return value as 'd', 'dm', 'dms'
# @param {Number} [dp=0|2|4]: No of decimal places to use - default 0 for dms, 2 for dm, 4 for d
# @returns {String} Deg/min/seconds
# Convert numeric degrees to deg/min/sec longitude (suffixed with E/W)
#
# @param {Number} deg: Degrees
# @param {String} [format=dms]: Return value as 'd', 'dm', 'dms'
# @param {Number} [dp=0|2|4]: No of decimal places to use - default 0 for dms, 2 for dm, 4 for d
# @returns {String} Deg/min/seconds
def to_lon deg, format = :dms, dp = 0
deg = deg.normalize_lng
lon = Dms.to_dms deg, format, dp
lon == '' ? '' : lon + (deg<0 ? 'W' : 'E')
end
# Convert numeric degrees to deg/min/sec as a bearing (0º..360º)
#
# @param {Number} deg: Degrees
# @param {String} [format=dms]: Return value as 'd', 'dm', 'dms'
# @param {Number} [dp=0|2|4]: No of decimal places to use - default 0 for dms, 2 for dm, 4 for d
# @returns {String} Deg/min/seconds
def to_brng deg, format = :dms, dp = 0
deg = (deg.to_f + 360) % 360 # normalise -ve values to 180º..360º
brng = Dms.to_dms deg, format, dp
brng.gsub /360/, '0' # just in case rounding took us up to 360º!
end
include NumericCheckExt # from sugar-high/numeric
# Converts numeric degrees to radians
def to_rad degrees
degrees * Math::PI / 180
end
alias_method :to_radians, :to_rad
alias_method :as_rad, :to_rad
alias_method :as_radians, :to_rad
alias_method :in_rad, :to_rad
alias_method :in_radians, :to_rad
# Converts radians to numeric (signed) degrees
# latitude (north to south) from equator +90 up then -90 down (equator again) = 180 then 180 for south = 360 total
# longitude (west to east) east +180, west -180 = 360 total
def to_deg radians
radians * 180 / Math::PI
end
alias_method :to_degrees, :to_deg
alias_method :as_deg, :to_deg
alias_method :as_degrees, :to_deg
alias_method :in_deg, :to_deg
alias_method :in_degrees, :to_deg
extend self
end
|
dicom/rtp-connect | lib/rtp-connect/plan_to_dcm.rb | RTP.Plan.add_doserate | ruby | def add_doserate(value, item)
if !@current_doserate || value != @current_doserate
@current_doserate = value
DICOM::Element.new('300A,0115', value, :parent => item)
end
end | Adds a Dose Rate Set element to a Control Point Item.
Note that the element is only added if there is no 'current' attribute
defined, or the given value is different form the current attribute.
@param [String, NilClass] value the doserate attribute
@param [DICOM::Item] item the DICOM control point item in which to create an element | train | https://github.com/dicom/rtp-connect/blob/e23791970218a7087a0d798aa430acf36f79d758/lib/rtp-connect/plan_to_dcm.rb#L414-L419 | class Plan < Record
attr_accessor :current_gantry
attr_accessor :current_collimator
attr_accessor :current_couch_angle
attr_accessor :current_couch_pedestal
attr_accessor :current_couch_lateral
attr_accessor :current_couch_longitudinal
attr_accessor :current_couch_vertical
# Converts the Plan (and child) records to a
# DICOM::DObject of modality RTPLAN.
#
# @note Only photon plans have been tested.
# Electron beams beams may give an invalid DICOM file.
# Also note that, due to limitations in the RTP file format, some original
# values can not be recreated, like e.g. Study UID or Series UID.
# @param [Hash] options the options to use for creating the DICOM object
# @option options [Boolean] :dose_ref if set, Dose Reference & Referenced Dose Reference sequences will be included in the generated DICOM file
# @option options [String] :manufacturer the value used for the manufacturer tag (0008,0070) in the beam sequence
# @option options [String] :model the value used for the manufacturer's model name tag (0008,1090) in the beam sequence
# @option options [Symbol] :scale if set, relevant device parameters are converted from native readout format to IEC1217 (supported values are :elekta & :varian)
# @option options [String] :serial_number the value used for the device serial number tag (0018,1000) in the beam sequence
# @return [DICOM::DObject] the converted DICOM object
#
def to_dcm(options={})
#
# FIXME: This method is rather big, with a few sections of somewhat similar, repeating code.
# Refactoring and simplifying it at some stage might be a good idea.
#
require 'dicom'
original_level = DICOM.logger.level
DICOM.logger.level = Logger::FATAL
p = @prescriptions.first
# If no prescription is present, we are not going to be able to make a valid DICOM object:
logger.error("No Prescription Record present. Unable to build a valid RTPLAN DICOM object.") unless p
dcm = DICOM::DObject.new
#
# TOP LEVEL TAGS:
#
# Specific Character Set:
DICOM::Element.new('0008,0005', 'ISO_IR 100', :parent => dcm)
# Instance Creation Date
DICOM::Element.new('0008,0012', Time.now.strftime("%Y%m%d"), :parent => dcm)
# Instance Creation Time:
DICOM::Element.new('0008,0013', Time.now.strftime("%H%M%S"), :parent => dcm)
# SOP Class UID:
DICOM::Element.new('0008,0016', '1.2.840.10008.5.1.4.1.1.481.5', :parent => dcm)
# SOP Instance UID (if an original UID is not present, we make up a UID):
begin
sop_uid = p.fields.first.extended_field.original_plan_uid.empty? ? DICOM.generate_uid : p.fields.first.extended_field.original_plan_uid
rescue
sop_uid = DICOM.generate_uid
end
DICOM::Element.new('0008,0018', sop_uid, :parent => dcm)
# Study Date
DICOM::Element.new('0008,0020', Time.now.strftime("%Y%m%d"), :parent => dcm)
# Study Time:
DICOM::Element.new('0008,0030', Time.now.strftime("%H%M%S"), :parent => dcm)
# Accession Number:
DICOM::Element.new('0008,0050', '', :parent => dcm)
# Modality:
DICOM::Element.new('0008,0060', 'RTPLAN', :parent => dcm)
# Manufacturer:
DICOM::Element.new('0008,0070', 'rtp-connect', :parent => dcm)
# Referring Physician's Name:
DICOM::Element.new('0008,0090', "#{@md_last_name}^#{@md_first_name}^#{@md_middle_name}^^", :parent => dcm)
# Operator's Name:
DICOM::Element.new('0008,1070', "#{@author_last_name}^#{@author_first_name}^#{@author_middle_name}^^", :parent => dcm)
# Patient's Name:
DICOM::Element.new('0010,0010', "#{@patient_last_name}^#{@patient_first_name}^#{@patient_middle_name}^^", :parent => dcm)
# Patient ID:
DICOM::Element.new('0010,0020', @patient_id, :parent => dcm)
# Patient's Birth Date:
DICOM::Element.new('0010,0030', '', :parent => dcm)
# Patient's Sex:
DICOM::Element.new('0010,0040', '', :parent => dcm)
# Manufacturer's Model Name:
DICOM::Element.new('0008,1090', 'RTP-to-DICOM', :parent => dcm)
# Software Version(s):
DICOM::Element.new('0018,1020', "RubyRTP#{VERSION}", :parent => dcm)
# Study Instance UID:
DICOM::Element.new('0020,000D', DICOM.generate_uid, :parent => dcm)
# Series Instance UID:
DICOM::Element.new('0020,000E', DICOM.generate_uid, :parent => dcm)
# Study ID:
DICOM::Element.new('0020,0010', '1', :parent => dcm)
# Series Number:
DICOM::Element.new('0020,0011', '1', :parent => dcm)
# Frame of Reference UID (if an original UID is not present, we make up a UID):
begin
for_uid = p.site_setup.frame_of_ref_uid.empty? ? DICOM.generate_uid : p.site_setup.frame_of_ref_uid
rescue
for_uid = DICOM.generate_uid
end
DICOM::Element.new('0020,0052', for_uid, :parent => dcm)
# Position Reference Indicator:
DICOM::Element.new('0020,1040', '', :parent => dcm)
# RT Plan Label (max 16 characters):
plan_label = p ? p.rx_site_name[0..15] : @course_id
DICOM::Element.new('300A,0002', plan_label, :parent => dcm)
# RT Plan Name:
plan_name = p ? p.rx_site_name : @course_id
DICOM::Element.new('300A,0003', plan_name, :parent => dcm)
# RT Plan Description:
plan_desc = p ? p.technique : @diagnosis
DICOM::Element.new('300A,0004', plan_desc, :parent => dcm)
# RT Plan Date:
plan_date = @plan_date.empty? ? Time.now.strftime("%Y%m%d") : @plan_date
DICOM::Element.new('300A,0006', plan_date, :parent => dcm)
# RT Plan Time:
plan_time = @plan_time.empty? ? Time.now.strftime("%H%M%S") : @plan_time
DICOM::Element.new('300A,0007', plan_time, :parent => dcm)
# Approval Status:
DICOM::Element.new('300E,0002', 'UNAPPROVED', :parent => dcm)
#
# SEQUENCES:
#
# Tolerance Table Sequence:
if p && p.fields.first && !p.fields.first.tolerance_table.empty?
tt_seq = DICOM::Sequence.new('300A,0040', :parent => dcm)
tt_item = DICOM::Item.new(:parent => tt_seq)
# Tolerance Table Number:
DICOM::Element.new('300A,0042', p.fields.first.tolerance_table, :parent => tt_item)
end
# Structure set information:
if p && p.site_setup && !p.site_setup.structure_set_uid.empty?
#
# Referenced Structure Set Sequence:
#
ss_seq = DICOM::Sequence.new('300C,0060', :parent => dcm)
ss_item = DICOM::Item.new(:parent => ss_seq)
# Referenced SOP Class UID:
DICOM::Element.new('0008,1150', '1.2.840.10008.5.1.4.1.1.481.3', :parent => ss_item)
DICOM::Element.new('0008,1155', p.site_setup.structure_set_uid, :parent => ss_item)
# RT Plan Geometry:
DICOM::Element.new('300A,000C', 'PATIENT', :parent => dcm)
else
# RT Plan Geometry:
DICOM::Element.new('300A,000C', 'TREATMENT_DEVICE', :parent => dcm)
end
#
# Patient Setup Sequence:
#
ps_seq = DICOM::Sequence.new('300A,0180', :parent => dcm)
ps_item = DICOM::Item.new(:parent => ps_seq)
# Patient Position:
begin
pat_pos = p.site_setup.patient_orientation.empty? ? 'HFS' : p.site_setup.patient_orientation
rescue
pat_pos = 'HFS'
end
DICOM::Element.new('0018,5100', pat_pos, :parent => ps_item)
# Patient Setup Number:
DICOM::Element.new('300A,0182', '1', :parent => ps_item)
# Setup Technique (assume Isocentric):
DICOM::Element.new('300A,01B0', 'ISOCENTRIC', :parent => ps_item)
#
# Dose Reference Sequence:
#
create_dose_reference(dcm, plan_name) if options[:dose_ref]
#
# Fraction Group Sequence:
#
fg_seq = DICOM::Sequence.new('300A,0070', :parent => dcm)
fg_item = DICOM::Item.new(:parent => fg_seq)
# Fraction Group Number:
DICOM::Element.new('300A,0071', '1', :parent => fg_item)
# Number of Fractions Planned (try to derive from total dose/fraction dose, or use 1 as default):
begin
num_frac = p.dose_ttl.empty? || p.dose_tx.empty? ? '1' : (p.dose_ttl.to_i / p.dose_tx.to_f).round.to_s
rescue
num_frac = '0'
end
DICOM::Element.new('300A,0078', num_frac, :parent => fg_item)
# Number of Brachy Application Setups:
DICOM::Element.new('300A,00A0', '0', :parent => fg_item)
# Referenced Beam Sequence (items created for each beam below):
rb_seq = DICOM::Sequence.new('300C,0004', :parent => fg_item)
#
# Beam Sequence:
#
b_seq = DICOM::Sequence.new('300A,00B0', :parent => dcm)
if p
# If no fields are present, we are not going to be able to make a valid DICOM object:
logger.error("No Field Record present. Unable to build a valid RTPLAN DICOM object.") unless p.fields.length > 0
p.fields.each_with_index do |field, i|
# Fields with modality 'Unspecified' (e.g. CT or 2dkV) must be skipped:
unless field.modality == 'Unspecified'
# If this is an electron beam, a warning should be printed, as these are less reliably converted:
logger.warn("This is not a photon beam (#{field.modality}). Beware that DICOM conversion of Electron beams are experimental, and other modalities are unsupported.") if field.modality != 'Xrays'
# Reset control point 'current value' attributes:
reset_cp_current_attributes
# Beam number and name:
beam_number = field.extended_field ? field.extended_field.original_beam_number : (i + 1).to_s
beam_name = field.extended_field ? field.extended_field.original_beam_name : field.field_name
# Ref Beam Item:
rb_item = DICOM::Item.new(:parent => rb_seq)
# Beam Dose (convert from cGy to Gy):
field_dose = field.field_dose.empty? ? '' : (field.field_dose.to_f * 0.01).round(4).to_s
DICOM::Element.new('300A,0084', field_dose, :parent => rb_item)
# Beam Meterset:
DICOM::Element.new('300A,0086', field.field_monitor_units, :parent => rb_item)
# Referenced Beam Number:
DICOM::Element.new('300C,0006', beam_number, :parent => rb_item)
# Beam Item:
b_item = DICOM::Item.new(:parent => b_seq)
# Optional method values:
# Manufacturer:
DICOM::Element.new('0008,0070', options[:manufacturer], :parent => b_item) if options[:manufacturer]
# Manufacturer's Model Name:
DICOM::Element.new('0008,1090', options[:model], :parent => b_item) if options[:model]
# Device Serial Number:
DICOM::Element.new('0018,1000', options[:serial_number], :parent => b_item) if options[:serial_number]
# Treatment Machine Name (max 16 characters):
DICOM::Element.new('300A,00B2', field.treatment_machine[0..15], :parent => b_item)
# Primary Dosimeter Unit:
DICOM::Element.new('300A,00B3', 'MU', :parent => b_item)
# Source-Axis Distance (convert to mm):
DICOM::Element.new('300A,00B4', "#{field.sad.to_f * 10}", :parent => b_item)
# Beam Number:
DICOM::Element.new('300A,00C0', beam_number, :parent => b_item)
# Beam Name:
DICOM::Element.new('300A,00C2', beam_name, :parent => b_item)
# Beam Description:
DICOM::Element.new('300A,00C3', field.field_note, :parent => b_item)
# Beam Type:
beam_type = case field.treatment_type
when 'Static' then 'STATIC'
when 'StepNShoot' then 'STATIC'
when 'VMAT' then 'DYNAMIC'
else logger.error("The beam type (treatment type) #{field.treatment_type} is not yet supported.")
end
DICOM::Element.new('300A,00C4', beam_type, :parent => b_item)
# Radiation Type:
rad_type = case field.modality
when 'Elect' then 'ELECTRON'
when 'Xrays' then 'PHOTON'
else logger.error("The radiation type (modality) #{field.modality} is not yet supported.")
end
DICOM::Element.new('300A,00C6', rad_type, :parent => b_item)
# Treatment Delivery Type:
DICOM::Element.new('300A,00CE', 'TREATMENT', :parent => b_item)
# Number of Wedges:
DICOM::Element.new('300A,00D0', (field.wedge.empty? ? '0' : '1'), :parent => b_item)
# Number of Compensators:
DICOM::Element.new('300A,00E0', (field.compensator.empty? ? '0' : '1'), :parent => b_item)
# Number of Boli:
DICOM::Element.new('300A,00ED', (field.bolus.empty? ? '0' : '1'), :parent => b_item)
# Number of Blocks:
DICOM::Element.new('300A,00F0', (field.block.empty? ? '0' : '1'), :parent => b_item)
# Final Cumulative Meterset Weight:
DICOM::Element.new('300A,010E', 1, :parent => b_item)
# Referenced Patient Setup Number:
DICOM::Element.new('300C,006A', '1', :parent => b_item)
#
# Beam Limiting Device Sequence:
#
create_beam_limiting_devices(b_item, field)
#
# Block Sequence (if any):
# FIXME: It seems that the Block Sequence (300A,00F4) may be
# difficult (impossible?) to reconstruct based on the RTP file's
# information, and thus it is skipped altogether.
#
#
# Applicator Sequence (if any):
#
unless field.e_applicator.empty?
app_seq = DICOM::Sequence.new('300A,0107', :parent => b_item)
app_item = DICOM::Item.new(:parent => app_seq)
# Applicator ID:
DICOM::Element.new('300A,0108', field.e_field_def_aperture, :parent => app_item)
# Applicator Type:
DICOM::Element.new('300A,0109', "ELECTRON_#{field.e_applicator.upcase}", :parent => app_item)
# Applicator Description:
DICOM::Element.new('300A,010A', "Appl. #{field.e_field_def_aperture}", :parent => app_item)
end
#
# Control Point Sequence:
#
# A field may have 0 (no MLC), 1 (conventional beam with MLC) or 2n (IMRT) control points.
# The DICOM file shall always contain 2n control points (minimum 2).
#
cp_seq = DICOM::Sequence.new('300A,0111', :parent => b_item)
if field.control_points.length < 2
# When we have 0 or 1 control point, use settings from field, and insert MLC settings if present:
# First CP:
cp_item = DICOM::Item.new(:parent => cp_seq)
# Control Point Index:
DICOM::Element.new('300A,0112', "0", :parent => cp_item)
# Nominal Beam Energy:
DICOM::Element.new('300A,0114', "#{field.energy.to_f}", :parent => cp_item)
# Dose Rate Set:
DICOM::Element.new('300A,0115', field.doserate, :parent => cp_item)
# Gantry Angle:
DICOM::Element.new('300A,011E', field.gantry_angle, :parent => cp_item)
# Gantry Rotation Direction:
DICOM::Element.new('300A,011F', (field.arc_direction.empty? ? 'NONE' : field.arc_direction), :parent => cp_item)
# Beam Limiting Device Angle:
DICOM::Element.new('300A,0120', field.collimator_angle, :parent => cp_item)
# Beam Limiting Device Rotation Direction:
DICOM::Element.new('300A,0121', 'NONE', :parent => cp_item)
# Patient Support Angle:
DICOM::Element.new('300A,0122', field.couch_pedestal, :parent => cp_item)
# Patient Support Rotation Direction:
DICOM::Element.new('300A,0123', 'NONE', :parent => cp_item)
# Table Top Eccentric Angle:
DICOM::Element.new('300A,0125', field.couch_angle, :parent => cp_item)
# Table Top Eccentric Rotation Direction:
DICOM::Element.new('300A,0126', 'NONE', :parent => cp_item)
# Table Top Vertical Position:
couch_vert = field.couch_vertical.empty? ? '' : (field.couch_vertical.to_f * 10).to_s
DICOM::Element.new('300A,0128', couch_vert, :parent => cp_item)
# Table Top Longitudinal Position:
couch_long = field.couch_longitudinal.empty? ? '' : (field.couch_longitudinal.to_f * 10).to_s
DICOM::Element.new('300A,0129', couch_long, :parent => cp_item)
# Table Top Lateral Position:
couch_lat = field.couch_lateral.empty? ? '' : (field.couch_lateral.to_f * 10).to_s
DICOM::Element.new('300A,012A', couch_lat, :parent => cp_item)
# Isocenter Position (x\y\z):
if p.site_setup
DICOM::Element.new('300A,012C', "#{(p.site_setup.iso_pos_x.to_f * 10).round(2)}\\#{(p.site_setup.iso_pos_y.to_f * 10).round(2)}\\#{(p.site_setup.iso_pos_z.to_f * 10).round(2)}", :parent => cp_item)
else
logger.warn("No Site Setup record exists for this plan. Unable to provide an isosenter position.")
DICOM::Element.new('300A,012C', '', :parent => cp_item)
end
# Source to Surface Distance:
add_ssd(field.ssd, cp_item)
# Cumulative Meterset Weight:
DICOM::Element.new('300A,0134', '0', :parent => cp_item)
# Beam Limiting Device Position Sequence:
if field.control_points.length > 0
create_beam_limiting_device_positions(cp_item, field.control_points.first, options)
else
create_beam_limiting_device_positions_from_field(cp_item, field, options)
end
# Referenced Dose Reference Sequence:
create_referenced_dose_reference(cp_item) if options[:dose_ref]
# Second CP:
cp_item = DICOM::Item.new(:parent => cp_seq)
# Control Point Index:
DICOM::Element.new('300A,0112', "1", :parent => cp_item)
# Cumulative Meterset Weight:
DICOM::Element.new('300A,0134', '1', :parent => cp_item)
else
# When we have multiple (2 or more) control points, iterate each control point:
field.control_points.each { |cp| create_control_point(cp, cp_seq, options) }
# Make sure that hte cumulative meterset weight of the last control
# point is '1' (exactly equal to final cumulative meterset weight):
cp_seq.items.last['300A,0134'].value = '1'
end
# Number of Control Points:
DICOM::Element.new('300A,0110', b_item['300A,0111'].items.length, :parent => b_item)
end
end
# Number of Beams:
DICOM::Element.new('300A,0080', fg_item['300C,0004'].items.length, :parent => fg_item)
end
# Restore the DICOM logger:
DICOM.logger.level = original_level
return dcm
end
private
# Adds an angular type value to a Control Point Item, by creating the
# necessary DICOM elements.
# Note that the element is only added if there is no 'current' attribute
# defined, or the given value is different form the current attribute.
#
# @param [DICOM::Item] item the DICOM control point item in which to create the elements
# @param [String] angle_tag the DICOM tag of the angle element
# @param [String] direction_tag the DICOM tag of the direction element
# @param [String, NilClass] angle the collimator angle attribute
# @param [String, NilClass] direction the collimator rotation direction attribute
# @param [Symbol] current_angle the instance variable that keeps track of the current value of this attribute
#
def add_angle(item, angle_tag, direction_tag, angle, direction, current_angle)
if !self.send(current_angle) || angle != self.send(current_angle)
self.send("#{current_angle}=", angle)
DICOM::Element.new(angle_tag, angle, :parent => item)
DICOM::Element.new(direction_tag, (direction.empty? ? 'NONE' : direction), :parent => item)
end
end
# Adds a Table Top Position element to a Control Point Item.
# Note that the element is only added if there is no 'current' attribute
# defined, or the given value is different form the current attribute.
#
# @param [DICOM::Item] item the DICOM control point item in which to create the element
# @param [String] tag the DICOM tag of the couch position element
# @param [String, NilClass] value the couch position
# @param [Symbol] current the instance variable that keeps track of the current value of this attribute
#
def add_couch_position(item, tag, value, current)
if !self.send(current) || value != self.send(current)
self.send("#{current}=", value)
DICOM::Element.new(tag, (value.empty? ? '' : value.to_f * 10), :parent => item)
end
end
# Adds a Dose Rate Set element to a Control Point Item.
# Note that the element is only added if there is no 'current' attribute
# defined, or the given value is different form the current attribute.
#
# @param [String, NilClass] value the doserate attribute
# @param [DICOM::Item] item the DICOM control point item in which to create an element
#
# Adds a Nominal Beam Energy element to a Control Point Item.
# Note that the element is only added if there is no 'current' attribute
# defined, or the given value is different form the current attribute.
#
# @param [String, NilClass] value the energy attribute
# @param [DICOM::Item] item the DICOM control point item in which to create an element
#
def add_energy(value, item)
if !@current_energy || value != @current_energy
@current_energy = value
DICOM::Element.new('300A,0114', "#{value.to_f}", :parent => item)
end
end
# Adds an Isosenter element to a Control Point Item.
# Note that the element is only added if there is a Site Setup record present,
# and it contains a real (non-empty) value. Also, the element is only added if there
# is no 'current' attribute defined, or the given value is different form the current attribute.
#
# @param [SiteSetup, NilClass] site_setup the associated site setup record
# @param [DICOM::Item] item the DICOM control point item in which to create an element
#
def add_isosenter(site_setup, item)
if site_setup
# Create an element if the value is new or unique:
if !@current_isosenter
iso = "#{(site_setup.iso_pos_x.to_f * 10).round(2)}\\#{(site_setup.iso_pos_y.to_f * 10).round(2)}\\#{(site_setup.iso_pos_z.to_f * 10).round(2)}"
if iso != @current_isosenter
@current_isosenter = iso
DICOM::Element.new('300A,012C', iso, :parent => item)
end
end
else
# Log a warning if this is the first control point:
unless @current_isosenter
logger.warn("No Site Setup record exists for this plan. Unable to provide an isosenter position.")
end
end
end
# Adds a Source to Surface Distance element to a Control Point Item.
# Note that the element is only added if the SSD attribute contains
# real (non-empty) value.
#
# @param [String, NilClass] value the SSD attribute
# @param [DICOM::Item] item the DICOM control point item in which to create an element
#
def add_ssd(value, item)
DICOM::Element.new('300A,0130', "#{value.to_f * 10}", :parent => item) if value && !value.empty?
end
# Creates a control point item in the given control point sequence, based
# on an RTP control point record.
#
# @param [ControlPoint] cp the RTP ControlPoint record to convert
# @param [DICOM::Sequence] sequence the DICOM parent sequence of the item to be created
# @param [Hash] options the options to use for creating the control point
# @option options [Boolean] :dose_ref if set, a Referenced Dose Reference sequence will be included in the generated control point item
# @return [DICOM::Item] the constructed control point DICOM item
#
def create_control_point(cp, sequence, options={})
cp_item = DICOM::Item.new(:parent => sequence)
# Some CP attributes will always be written (CP index, BLD positions & Cumulative meterset weight).
# The other attributes are only written if they are different from the previous control point.
# Control Point Index:
DICOM::Element.new('300A,0112', "#{cp.index}", :parent => cp_item)
# Beam Limiting Device Position Sequence:
create_beam_limiting_device_positions(cp_item, cp, options)
# Source to Surface Distance:
add_ssd(cp.ssd, cp_item)
# Cumulative Meterset Weight:
DICOM::Element.new('300A,0134', cp.monitor_units.to_f, :parent => cp_item)
# Referenced Dose Reference Sequence:
create_referenced_dose_reference(cp_item) if options[:dose_ref]
# Attributes that are only added if they carry an updated value:
# Nominal Beam Energy:
add_energy(cp.energy, cp_item)
# Dose Rate Set:
add_doserate(cp.doserate, cp_item)
# Gantry Angle & Rotation Direction:
add_angle(cp_item, '300A,011E', '300A,011F', cp.gantry_angle, cp.gantry_dir, :current_gantry)
# Beam Limiting Device Angle & Rotation Direction:
add_angle(cp_item, '300A,0120', '300A,0121', cp.collimator_angle, cp.collimator_dir, :current_collimator)
# Patient Support Angle & Rotation Direction:
add_angle(cp_item, '300A,0122', '300A,0123', cp.couch_pedestal, cp.couch_ped_dir, :current_couch_pedestal)
# Table Top Eccentric Angle & Rotation Direction:
add_angle(cp_item, '300A,0125', '300A,0126', cp.couch_angle, cp.couch_dir, :current_couch_angle)
# Table Top Vertical Position:
add_couch_position(cp_item, '300A,0128', cp.couch_vertical, :current_couch_vertical)
# Table Top Longitudinal Position:
add_couch_position(cp_item, '300A,0129', cp.couch_longitudinal, :current_couch_longitudinal)
# Table Top Lateral Position:
add_couch_position(cp_item, '300A,012A', cp.couch_lateral, :current_couch_lateral)
# Isocenter Position (x\y\z):
add_isosenter(cp.parent.parent.site_setup, cp_item)
cp_item
end
# Creates a beam limiting device sequence in the given DICOM object.
#
# @param [DICOM::Item] beam_item the DICOM beam item in which to insert the sequence
# @param [Field] field the RTP field to fetch device parameters from
# @return [DICOM::Sequence] the constructed beam limiting device sequence
#
def create_beam_limiting_devices(beam_item, field)
bl_seq = DICOM::Sequence.new('300A,00B6', :parent => beam_item)
# The ASYMX item ('backup jaws') doesn't exist on all models:
if ['SYM', 'ASY'].include?(field.field_x_mode.upcase)
bl_item_x = DICOM::Item.new(:parent => bl_seq)
DICOM::Element.new('300A,00B8', "ASYMX", :parent => bl_item_x)
DICOM::Element.new('300A,00BC', "1", :parent => bl_item_x)
end
# The ASYMY item is always created:
bl_item_y = DICOM::Item.new(:parent => bl_seq)
# RT Beam Limiting Device Type:
DICOM::Element.new('300A,00B8', "ASYMY", :parent => bl_item_y)
# Number of Leaf/Jaw Pairs:
DICOM::Element.new('300A,00BC', "1", :parent => bl_item_y)
# MLCX item is only created if leaves are defined:
# (NB: The RTP file doesn't specify leaf position boundaries, so we
# have to set these based on a set of known MLC types, their number
# of leaves, and their leaf boundary positions.)
if field.control_points.length > 0
bl_item_mlcx = DICOM::Item.new(:parent => bl_seq)
DICOM::Element.new('300A,00B8', "MLCX", :parent => bl_item_mlcx)
num_leaves = field.control_points.first.mlc_leaves.to_i
DICOM::Element.new('300A,00BC', num_leaves.to_s, :parent => bl_item_mlcx)
DICOM::Element.new('300A,00BE', "#{RTP.leaf_boundaries(num_leaves).join("\\")}", :parent => bl_item_mlcx)
end
bl_seq
end
# Creates a beam limiting device positions sequence in the given DICOM object.
#
# @param [DICOM::Item] cp_item the DICOM control point item in which to insert the sequence
# @param [ControlPoint] cp the RTP control point to fetch device parameters from
# @return [DICOM::Sequence] the constructed beam limiting device positions sequence
#
def create_beam_limiting_device_positions(cp_item, cp, options={})
dp_seq = DICOM::Sequence.new('300A,011A', :parent => cp_item)
# The ASYMX item ('backup jaws') doesn't exist on all models:
if ['SYM', 'ASY'].include?(cp.parent.field_x_mode.upcase)
dp_item_x = create_asym_item(cp, dp_seq, axis=:x, options)
end
# Always create one ASYMY item:
dp_item_y = create_asym_item(cp, dp_seq, axis=:y, options)
# MLCX:
dp_item_mlcx = DICOM::Item.new(:parent => dp_seq)
# RT Beam Limiting Device Type:
DICOM::Element.new('300A,00B8', "MLCX", :parent => dp_item_mlcx)
# Leaf/Jaw Positions:
DICOM::Element.new('300A,011C', cp.dcm_mlc_positions(options[:scale]), :parent => dp_item_mlcx)
dp_seq
end
# Creates an ASYMX or ASYMY item.
#
# @param [ControlPoint] cp the RTP control point to fetch device parameters from
# @param [DICOM::Sequence] dcm_parent the DICOM sequence in which to insert the item
# @param [Symbol] axis the axis for the item (:x or :y)
# @return [DICOM::Item] the constructed ASYMX or ASYMY item
#
def create_asym_item(cp, dcm_parent, axis, options={})
val1 = cp.send("dcm_collimator_#{axis.to_s}1", options[:scale])
val2 = cp.send("dcm_collimator_#{axis.to_s}2", options[:scale])
item = DICOM::Item.new(:parent => dcm_parent)
# RT Beam Limiting Device Type:
DICOM::Element.new('300A,00B8', "ASYM#{axis.to_s.upcase}", :parent => item)
# Leaf/Jaw Positions:
DICOM::Element.new('300A,011C', "#{val1}\\#{val2}", :parent => item)
item
end
# Creates a beam limiting device positions sequence in the given DICOM object.
#
# @param [DICOM::Item] cp_item the DICOM control point item in which to insert the sequence
# @param [Field] field the RTP treatment field to fetch device parameters from
# @return [DICOM::Sequence] the constructed beam limiting device positions sequence
#
def create_beam_limiting_device_positions_from_field(cp_item, field, options={})
dp_seq = DICOM::Sequence.new('300A,011A', :parent => cp_item)
# ASYMX:
dp_item_x = DICOM::Item.new(:parent => dp_seq)
DICOM::Element.new('300A,00B8', "ASYMX", :parent => dp_item_x)
DICOM::Element.new('300A,011C', "#{field.dcm_collimator_x1}\\#{field.dcm_collimator_x2}", :parent => dp_item_x)
# ASYMY:
dp_item_y = DICOM::Item.new(:parent => dp_seq)
DICOM::Element.new('300A,00B8', "ASYMY", :parent => dp_item_y)
DICOM::Element.new('300A,011C', "#{field.dcm_collimator_y1}\\#{field.dcm_collimator_y2}", :parent => dp_item_y)
dp_seq
end
# Creates a dose reference sequence in the given DICOM object.
#
# @param [DICOM::DObject] dcm the DICOM object in which to insert the sequence
# @param [String] description the value to use for Dose Reference Description
# @return [DICOM::Sequence] the constructed dose reference sequence
#
def create_dose_reference(dcm, description)
dr_seq = DICOM::Sequence.new('300A,0010', :parent => dcm)
dr_item = DICOM::Item.new(:parent => dr_seq)
# Dose Reference Number:
DICOM::Element.new('300A,0012', '1', :parent => dr_item)
# Dose Reference Structure Type:
DICOM::Element.new('300A,0014', 'SITE', :parent => dr_item)
# Dose Reference Description:
DICOM::Element.new('300A,0016', description, :parent => dr_item)
# Dose Reference Type:
DICOM::Element.new('300A,0020', 'TARGET', :parent => dr_item)
dr_seq
end
# Creates a referenced dose reference sequence in the given DICOM object.
#
# @param [DICOM::Item] cp_item the DICOM item in which to insert the sequence
# @return [DICOM::Sequence] the constructed referenced dose reference sequence
#
def create_referenced_dose_reference(cp_item)
# Referenced Dose Reference Sequence:
rd_seq = DICOM::Sequence.new('300C,0050', :parent => cp_item)
rd_item = DICOM::Item.new(:parent => rd_seq)
# Cumulative Dose Reference Coeffecient:
DICOM::Element.new('300A,010C', '', :parent => rd_item)
# Referenced Dose Reference Number:
DICOM::Element.new('300C,0051', '1', :parent => rd_item)
rd_seq
end
# Resets the types of control point attributes that are only written to the
# first control point item, and for following control point items only when
# they are different from the 'current' value. When a new field is reached,
# it is essential to reset these attributes, or else we could risk to start
# the field with a control point with missing attributes, if one of its first
# attributes is equal to the last attribute of the previous field.
#
def reset_cp_current_attributes
@current_gantry = nil
@current_collimator = nil
@current_couch_pedestal = nil
@current_couch_angle = nil
@current_couch_vertical = nil
@current_couch_longitudinal = nil
@current_couch_lateral = nil
@current_isosenter = nil
end
end
|
algolia/algoliasearch-client-ruby | lib/algolia/index.rb | Algolia.Index.get_objects | ruby | def get_objects(objectIDs, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
requests = objectIDs.map do |objectID|
req = { :indexName => name, :objectID => objectID.to_s }
req[:attributesToRetrieve] = attributes_to_retrieve unless attributes_to_retrieve.nil?
req
end
client.post(Protocol.objects_uri, { :requests => requests }.to_json, :read, request_options)['results']
end | Get a list of objects from this index
@param objectIDs the array of unique identifier of the objects to retrieve
@param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
@param request_options contains extra parameters to send with your query | train | https://github.com/algolia/algoliasearch-client-ruby/blob/5292cd9b1029f879e4e0257a3e89d0dc9ad0df3b/lib/algolia/index.rb#L257-L265 | class Index
attr_accessor :name, :client
def initialize(name, client = nil)
self.name = name
self.client = client || Algolia.client
end
#
# Delete an index
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete(request_options = {})
client.delete(Protocol.index_uri(name), :write, request_options)
end
alias_method :delete_index, :delete
#
# Delete an index and wait until the deletion has been processed
#
# @param request_options contains extra parameters to send with your query
#
# return an hash of the form { "deletedAt" => "2013-01-18T15:33:13.556Z", "taskID" => "42" }
#
def delete!(request_options = {})
res = delete(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :delete_index!, :delete!
#
# Add an object in this index
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param request_options contains extra parameters to send with your query
#
def add_object(object, objectID = nil, request_options = {})
check_object(object)
if objectID.nil? || objectID.to_s.empty?
client.post(Protocol.index_uri(name), object.to_json, :write, request_options)
else
client.put(Protocol.object_uri(name, objectID), object.to_json, :write, request_options)
end
end
#
# Add an object in this index and wait end of indexing
#
# @param object the object to add to the index.
# The object is represented by an associative array
# @param objectID (optional) an objectID you want to attribute to this object
# (if the attribute already exist the old object will be overridden)
# @param Request options object. Contains extra URL parameters or headers
#
def add_object!(object, objectID = nil, request_options = {})
res = add_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add several objects in this index
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects(objects, request_options = {})
batch(build_batch('addObject', objects, false), request_options)
end
#
# Add several objects in this index and wait end of indexing
#
# @param objects the array of objects to add inside the index.
# Each object is represented by an associative array
# @param request_options contains extra parameters to send with your query
#
def add_objects!(objects, request_options = {})
res = add_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search inside the index
#
# @param query the full text query
# @param args (optional) if set, contains an associative array with query parameters:
# - page: (integer) Pagination parameter used to select the page to retrieve.
# Page is zero-based and defaults to 0. Thus, to retrieve the 10th page you need to set page=9
# - hitsPerPage: (integer) Pagination parameter used to select the number of hits per page. Defaults to 20.
# - attributesToRetrieve: a string that contains the list of object attributes you want to retrieve (let you minimize the answer size).
# Attributes are separated with a comma (for example "name,address").
# You can also use a string array encoding (for example ["name","address"]).
# By default, all attributes are retrieved. You can also use '*' to retrieve all values when an attributesToRetrieve setting is specified for your index.
# - attributesToHighlight: a string that contains the list of attributes you want to highlight according to the query.
# Attributes are separated by a comma. You can also use a string array encoding (for example ["name","address"]).
# If an attribute has no match for the query, the raw value is returned. By default all indexed text attributes are highlighted.
# You can use `*` if you want to highlight all textual attributes. Numerical attributes are not highlighted.
# A matchLevel is returned for each highlighted attribute and can contain:
# - full: if all the query terms were found in the attribute,
# - partial: if only some of the query terms were found,
# - none: if none of the query terms were found.
# - attributesToSnippet: a string that contains the list of attributes to snippet alongside the number of words to return (syntax is `attributeName:nbWords`).
# Attributes are separated by a comma (Example: attributesToSnippet=name:10,content:10).
# You can also use a string array encoding (Example: attributesToSnippet: ["name:10","content:10"]). By default no snippet is computed.
# - minWordSizefor1Typo: the minimum number of characters in a query word to accept one typo in this word. Defaults to 3.
# - minWordSizefor2Typos: the minimum number of characters in a query word to accept two typos in this word. Defaults to 7.
# - getRankingInfo: if set to 1, the result hits will contain ranking information in _rankingInfo attribute.
# - aroundLatLng: search for entries around a given latitude/longitude (specified as two floats separated by a comma).
# For example aroundLatLng=47.316669,5.016670).
# You can specify the maximum distance in meters with the aroundRadius parameter (in meters) and the precision for ranking with aroundPrecision
# (for example if you set aroundPrecision=100, two objects that are distant of less than 100m will be considered as identical for "geo" ranking parameter).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - insideBoundingBox: search entries inside a given area defined by the two extreme points of a rectangle (defined by 4 floats: p1Lat,p1Lng,p2Lat,p2Lng).
# For example insideBoundingBox=47.3165,4.9665,47.3424,5.0201).
# At indexing, you should specify geoloc of an object with the _geoloc attribute (in the form {"_geoloc":{"lat":48.853409, "lng":2.348800}})
# - numericFilters: a string that contains the list of numeric filters you want to apply separated by a comma.
# The syntax of one filter is `attributeName` followed by `operand` followed by `value`. Supported operands are `<`, `<=`, `=`, `>` and `>=`.
# You can have multiple conditions on one attribute like for example numericFilters=price>100,price<1000.
# You can also use a string array encoding (for example numericFilters: ["price>100","price<1000"]).
# - tagFilters: filter the query by a set of tags. You can AND tags by separating them by commas.
# To OR tags, you must add parentheses. For example, tags=tag1,(tag2,tag3) means tag1 AND (tag2 OR tag3).
# You can also use a string array encoding, for example tagFilters: ["tag1",["tag2","tag3"]] means tag1 AND (tag2 OR tag3).
# At indexing, tags should be added in the _tags** attribute of objects (for example {"_tags":["tag1","tag2"]}).
# - facetFilters: filter the query by a list of facets.
# Facets are separated by commas and each facet is encoded as `attributeName:value`.
# For example: `facetFilters=category:Book,author:John%20Doe`.
# You can also use a string array encoding (for example `["category:Book","author:John%20Doe"]`).
# - facets: List of object attributes that you want to use for faceting.
# Attributes are separated with a comma (for example `"category,author"` ).
# You can also use a JSON string array encoding (for example ["category","author"]).
# Only attributes that have been added in **attributesForFaceting** index setting can be used in this parameter.
# You can also use `*` to perform faceting on all attributes specified in **attributesForFaceting**.
# - queryType: select how the query words are interpreted, it can be one of the following value:
# - prefixAll: all query words are interpreted as prefixes,
# - prefixLast: only the last word is interpreted as a prefix (default behavior),
# - prefixNone: no query word is interpreted as a prefix. This option is not recommended.
# - optionalWords: a string that contains the list of words that should be considered as optional when found in the query.
# The list of words is comma separated.
# - distinct: If set to 1, enable the distinct feature (disabled by default) if the attributeForDistinct index setting is set.
# This feature is similar to the SQL "distinct" keyword: when enabled in a query with the distinct=1 parameter,
# all hits containing a duplicate value for the attributeForDistinct attribute are removed from results.
# For example, if the chosen attribute is show_name and several hits have the same value for show_name, then only the best
# one is kept and others are removed.
# @param request_options contains extra parameters to send with your query
#
def search(query, params = {}, request_options = {})
encoded_params = Hash[params.map { |k, v| [k.to_s, v.is_a?(Array) ? v.to_json : v] }]
encoded_params[:query] = query
client.post(Protocol.search_post_uri(name), { :params => Protocol.to_query(encoded_params) }.to_json, :search, request_options)
end
class IndexBrowser
def initialize(client, name, params)
@client = client
@name = name
@params = params
@cursor = params[:cursor] || params['cursor'] || nil
end
def browse(request_options = {}, &block)
loop do
answer = @client.get(Protocol.browse_uri(@name, @params.merge({ :cursor => @cursor })), :read, request_options)
answer['hits'].each do |hit|
if block.arity == 2
yield hit, @cursor
else
yield hit
end
end
@cursor = answer['cursor']
break if @cursor.nil?
end
end
end
#
# Browse all index content
#
# @param queryParameters The hash of query parameters to use to browse
# To browse from a specific cursor, just add a ":cursor" parameters
# @param queryParameters An optional second parameters hash here for backward-compatibility (which will be merged with the first)
# @param request_options contains extra parameters to send with your query
#
# @DEPRECATED:
# @param page Pagination parameter used to select the page to retrieve.
# @param hits_per_page Pagination parameter used to select the number of hits per page. Defaults to 1000.
#
def browse(page_or_query_parameters = nil, hits_per_page = nil, request_options = {}, &block)
params = {}
if page_or_query_parameters.is_a?(Hash)
params.merge!(page_or_query_parameters)
else
params[:page] = page_or_query_parameters unless page_or_query_parameters.nil?
end
if hits_per_page.is_a?(Hash)
params.merge!(hits_per_page)
else
params[:hitsPerPage] = hits_per_page unless hits_per_page.nil?
end
if block_given?
IndexBrowser.new(client, name, params).browse(request_options, &block)
else
params[:page] ||= 0
params[:hitsPerPage] ||= 1000
client.get(Protocol.browse_uri(name, params), :read, request_options)
end
end
#
# Browse a single page from a specific cursor
#
# @param request_options contains extra parameters to send with your query
#
def browse_from(cursor, hits_per_page = 1000, request_options = {})
client.post(Protocol.browse_uri(name), { :cursor => cursor, :hitsPerPage => hits_per_page }.to_json, :read, request_options)
end
#
# Get an object from this index
#
# @param objectID the unique identifier of the object to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
def get_object(objectID, attributes_to_retrieve = nil, request_options = {})
attributes_to_retrieve = attributes_to_retrieve.join(',') if attributes_to_retrieve.is_a?(Array)
if attributes_to_retrieve.nil?
client.get(Protocol.object_uri(name, objectID, nil), :read, request_options)
else
client.get(Protocol.object_uri(name, objectID, { :attributes => attributes_to_retrieve }), :read, request_options)
end
end
#
# Get a list of objects from this index
#
# @param objectIDs the array of unique identifier of the objects to retrieve
# @param attributes_to_retrieve (optional) if set, contains the list of attributes to retrieve as an array of strings of a string separated by ","
# @param request_options contains extra parameters to send with your query
#
#
# Check the status of a task on the server.
# All server task are asynchronous and you can check the status of a task with this method.
#
# @param taskID the id of the task returned by server
# @param request_options contains extra parameters to send with your query
#
def get_task_status(taskID, request_options = {})
client.get_task_status(name, taskID, request_options)
end
#
# Wait the publication of a task on the server.
# All server task are asynchronous and you can check with this method that the task is published.
#
# @param taskID the id of the task returned by server
# @param time_before_retry the time in milliseconds before retry (default = 100ms)
# @param request_options contains extra parameters to send with your query
#
def wait_task(taskID, time_before_retry = WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options = {})
client.wait_task(name, taskID, time_before_retry, request_options)
end
#
# Override the content of an object
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object(object, objectID = nil, request_options = {})
client.put(Protocol.object_uri(name, get_objectID(object, objectID)), object.to_json, :write, request_options)
end
#
# Override the content of object and wait end of indexing
#
# @param object the object to save
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_object!(object, objectID = nil, request_options = {})
res = save_object(object, objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the content of several objects
#
# @param objects the array of objects to save, each object must contain an 'objectID' key
# @param request_options contains extra parameters to send with your query
#
def save_objects(objects, request_options = {})
batch(build_batch('updateObject', objects, true), request_options)
end
#
# Override the content of several objects and wait end of indexing
#
# @param objects the array of objects to save, each object must contain an objectID attribute
# @param request_options contains extra parameters to send with your query
#
def save_objects!(objects, request_options = {})
res = save_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Override the current objects by the given array of objects and wait end of indexing. Settings,
# synonyms and query rules are untouched. The objects are replaced without any downtime.
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects(objects, request_options = {})
safe = request_options[:safe] || request_options['safe'] || false
request_options.delete(:safe)
request_options.delete('safe')
tmp_index = @client.init_index(@name + '_tmp_' + rand(10000000).to_s)
responses = []
scope = ['settings', 'synonyms', 'rules']
res = @client.copy_index(@name, tmp_index.name, scope, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
batch = []
batch_size = 1000
count = 0
objects.each do |object|
batch << object
count += 1
if count == batch_size
res = tmp_index.add_objects(batch, request_options)
responses << res
batch = []
count = 0
end
end
if batch.any?
res = tmp_index.add_objects(batch, request_options)
responses << res
end
if safe
responses.each do |res|
tmp_index.wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
end
res = @client.move_index(tmp_index.name, @name, request_options)
responses << res
if safe
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
end
responses
end
#
# Override the current objects by the given array of objects and wait end of indexing
#
# @param objects the array of objects to save
# @param request_options contains extra parameters to send with your query
#
def replace_all_objects!(objects, request_options = {})
replace_all_objects(objects, request_options.merge(:safe => true))
end
#
# Update partially an object (only update attributes passed in argument)
#
# @param object the object attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object(object, objectID = nil, create_if_not_exits = true, request_options = {})
client.post(Protocol.partial_object_uri(name, get_objectID(object, objectID), create_if_not_exits), object.to_json, :write, request_options)
end
#
# Partially override the content of several objects
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects(objects, create_if_not_exits = true, request_options = {})
if create_if_not_exits
batch(build_batch('partialUpdateObject', objects, true), request_options)
else
batch(build_batch('partialUpdateObjectNoCreate', objects, true), request_options)
end
end
#
# Partially override the content of several objects and wait end of indexing
#
# @param objects an array of objects to update (each object must contains a objectID attribute)
# @param create_if_not_exits a boolean, if true create the objects if they don't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_objects!(objects, create_if_not_exits = true, request_options = {})
res = partial_update_objects(objects, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Update partially an object (only update attributes passed in argument) and wait indexing
#
# @param object the attributes to override
# @param objectID the associated objectID, if nil 'object' must contain an 'objectID' key
# @param create_if_not_exits a boolean, if true creates the object if this one doesn't exist
# @param request_options contains extra parameters to send with your query
#
def partial_update_object!(object, objectID = nil, create_if_not_exits = true, request_options = {})
res = partial_update_object(object, objectID, create_if_not_exits, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete an object from the index
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object(objectID, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.delete(Protocol.object_uri(name, objectID), :write, request_options)
end
#
# Delete an object from the index and wait end of indexing
#
# @param objectID the unique identifier of object to delete
# @param request_options contains extra parameters to send with your query
#
def delete_object!(objectID, request_options = {})
res = delete_object(objectID, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete several objects
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects(objects, request_options = {})
check_array(objects)
batch(build_batch('deleteObject', objects.map { |objectID| { :objectID => objectID } }, false), request_options)
end
#
# Delete several objects and wait end of indexing
#
# @param objects an array of objectIDs
# @param request_options contains extra parameters to send with your query
#
def delete_objects!(objects, request_options = {})
res = delete_objects(objects, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Delete all objects matching a query
# This method retrieves all objects synchronously but deletes in batch
# asynchronously
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query(query, params = nil, request_options = {})
raise ArgumentError.new('query cannot be nil, use the `clear` method to wipe the entire index') if query.nil? && params.nil?
params = sanitized_delete_by_query_params(params)
params[:query] = query
params[:hitsPerPage] = 1000
params[:distinct] = false
params[:attributesToRetrieve] = ['objectID']
params[:cursor] = ''
ids = []
while params[:cursor] != nil
result = browse(params, nil, request_options)
params[:cursor] = result['cursor']
hits = result['hits']
break if hits.empty?
ids += hits.map { |hit| hit['objectID'] }
end
delete_objects(ids, request_options)
end
#
# Delete all objects matching a query and wait end of indexing
#
# @param query the query string
# @param params the optional query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by_query!(query, params = nil, request_options = {})
res = delete_by_query(query, params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided
#
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by(params, request_options = {})
raise ArgumentError.new('params cannot be nil, use the `clear` method to wipe the entire index') if params.nil?
params = sanitized_delete_by_query_params(params)
client.post(Protocol.delete_by_uri(name), params.to_json, :write, request_options)
end
#
# Delete all objects matching a query (doesn't work with actual text queries)
# This method deletes every record matching the filters provided and waits for the end of indexing
# @param params query parameters
# @param request_options contains extra parameters to send with your query
#
def delete_by!(params, request_options = {})
res = delete_by(params, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options) if res
res
end
#
# Delete the index content
#
# @param request_options contains extra parameters to send with your query
#
def clear(request_options = {})
client.post(Protocol.clear_uri(name), {}, :write, request_options)
end
alias_method :clear_index, :clear
#
# Delete the index content and wait end of indexing
#
def clear!(request_options = {})
res = clear(request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
alias_method :clear_index!, :clear!
#
# Set settings for this index
#
def set_settings(new_settings, options = {}, request_options = {})
client.put(Protocol.settings_uri(name, options), new_settings.to_json, :write, request_options)
end
#
# Set settings for this index and wait end of indexing
#
def set_settings!(new_settings, options = {}, request_options = {})
res = set_settings(new_settings, options, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Get settings of this index
#
def get_settings(options = {}, request_options = {})
options['getVersion'] = 2 if !options[:getVersion] && !options['getVersion']
client.get(Protocol.settings_uri(name, options).to_s, :read, request_options)
end
#
# List all existing user keys with their associated ACLs
#
# Deprecated: Please us `client.list_api_keys` instead.
def list_api_keys(request_options = {})
client.get(Protocol.index_keys_uri(name), :read, request_options)
end
#
# Get ACL of a user key
#
# Deprecated: Please us `client.get_api_key` instead.
def get_api_key(key, request_options = {})
client.get(Protocol.index_key_uri(name, key), :read, request_options)
end
#
# Create a new user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that can
# contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.add_api_key` instead
def add_api_key(object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.post(Protocol.index_keys_uri(name), params.to_json, :write, request_options)
end
#
# Update a user key
#
# @param object can be two different parameters:
# The list of parameters for this key. Defined by a Hash that
# can contains the following values:
# - acl: array of string
# - validity: int
# - referers: array of string
# - description: string
# - maxHitsPerQuery: integer
# - queryParameters: string
# - maxQueriesPerIPPerHour: integer
# Or the list of ACL for this key. Defined by an array of String that
# can contains the following values:
# - search: allow to search (https and http)
# - addObject: allows to add/update an object in the index (https only)
# - deleteObject : allows to delete an existing object (https only)
# - deleteIndex : allows to delete index content (https only)
# - settings : allows to get index settings (https only)
# - editSettings : allows to change index settings (https only)
# @param validity the number of seconds after which the key will be automatically removed (0 means no time limit for this key)
# @param max_queries_per_IP_per_hour the maximum number of API calls allowed from an IP address per hour (0 means unlimited)
# @param max_hits_per_query the maximum number of hits this API key can retrieve in one call (0 means unlimited)
# @param request_options contains extra parameters to send with your query
#
# Deprecated: Please use `client.update_api_key` instead
def update_api_key(key, object, validity = 0, max_queries_per_IP_per_hour = 0, max_hits_per_query = 0, request_options = {})
if object.instance_of?(Array)
params = { :acl => object }
else
params = object
end
params['validity'] = validity.to_i if validity != 0
params['maxHitsPerQuery'] = max_hits_per_query.to_i if max_hits_per_query != 0
params['maxQueriesPerIPPerHour'] = max_queries_per_IP_per_hour.to_i if max_queries_per_IP_per_hour != 0
client.put(Protocol.index_key_uri(name, key), params.to_json, :write, request_options)
end
#
# Delete an existing user key
#
# Deprecated: Please use `client.delete_api_key` instead
def delete_api_key(key, request_options = {})
client.delete(Protocol.index_key_uri(name, key), :write, request_options)
end
#
# Send a batch request
#
def batch(request, request_options = {})
client.post(Protocol.batch_uri(name), request.to_json, :batch, request_options)
end
#
# Send a batch request and wait the end of the indexing
#
def batch!(request, request_options = {})
res = batch(request, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Search for facet values
#
# @param facet_name Name of the facet to search. It must have been declared in the
# index's`attributesForFaceting` setting with the `searchable()` modifier.
# @param facet_query Text to search for in the facet's values
# @param search_parameters An optional query to take extra search parameters into account.
# These parameters apply to index objects like in a regular search query.
# Only facet values contained in the matched objects will be returned.
# @param request_options contains extra parameters to send with your query
#
def search_for_facet_values(facet_name, facet_query, search_parameters = {}, request_options = {})
params = search_parameters.clone
params['facetQuery'] = facet_query
client.post(Protocol.search_facet_uri(name, facet_name), params.to_json, :read, request_options)
end
# deprecated
alias_method :search_facet, :search_for_facet_values
#
# Perform a search with disjunctive facets generating as many queries as number of disjunctive facets
#
# @param query the query
# @param disjunctive_facets the array of disjunctive facets
# @param params a hash representing the regular query parameters
# @param refinements a hash ("string" -> ["array", "of", "refined", "values"]) representing the current refinements
# ex: { "my_facet1" => ["my_value1", ["my_value2"], "my_disjunctive_facet1" => ["my_value1", "my_value2"] }
# @param request_options contains extra parameters to send with your query
#
def search_disjunctive_faceting(query, disjunctive_facets, params = {}, refinements = {}, request_options = {})
raise ArgumentError.new('Argument "disjunctive_facets" must be a String or an Array') unless disjunctive_facets.is_a?(String) || disjunctive_facets.is_a?(Array)
raise ArgumentError.new('Argument "refinements" must be a Hash of Arrays') if !refinements.is_a?(Hash) || !refinements.select { |k, v| !v.is_a?(Array) }.empty?
# extract disjunctive facets & associated refinements
disjunctive_facets = disjunctive_facets.split(',') if disjunctive_facets.is_a?(String)
disjunctive_refinements = {}
refinements.each do |k, v|
disjunctive_refinements[k] = v if disjunctive_facets.include?(k) || disjunctive_facets.include?(k.to_s)
end
# build queries
queries = []
## hits + regular facets query
filters = []
refinements.to_a.each do |k, values|
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
queries << params.merge({ :index_name => self.name, :query => query, :facetFilters => filters })
## one query per disjunctive facet (use all refinements but the current one + hitsPerPage=1 + single facet)
disjunctive_facets.each do |disjunctive_facet|
filters = []
refinements.each do |k, values|
if k.to_s != disjunctive_facet.to_s
r = values.map { |v| "#{k}:#{v}" }
if disjunctive_refinements[k.to_s] || disjunctive_refinements[k.to_sym]
# disjunctive refinements are ORed
filters << r
else
# regular refinements are ANDed
filters += r
end
end
end
queries << params.merge({
:index_name => self.name,
:query => query,
:page => 0,
:hitsPerPage => 1,
:attributesToRetrieve => [],
:attributesToHighlight => [],
:attributesToSnippet => [],
:facets => disjunctive_facet,
:facetFilters => filters,
:analytics => false
})
end
answers = client.multiple_queries(queries, { :request_options => request_options })
# aggregate answers
## first answer stores the hits + regular facets
aggregated_answer = answers['results'][0]
## others store the disjunctive facets
aggregated_answer['disjunctiveFacets'] = {}
answers['results'].each_with_index do |a, i|
next if i == 0
a['facets'].each do |facet, values|
## add the facet to the disjunctive facet hash
aggregated_answer['disjunctiveFacets'][facet] = values
## concatenate missing refinements
(disjunctive_refinements[facet.to_s] || disjunctive_refinements[facet.to_sym] || []).each do |r|
if aggregated_answer['disjunctiveFacets'][facet][r].nil?
aggregated_answer['disjunctiveFacets'][facet][r] = 0
end
end
end
end
aggregated_answer
end
#
# Alias of Algolia.list_indexes
#
# @param request_options contains extra parameters to send with your query
#
def Index.all(request_options = {})
Algolia.list_indexes(request_options)
end
#
# Search synonyms
#
# @param query the query
# @param params an optional hash of :type, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_synonyms(query, params = {}, request_options = {})
type = params[:type] || params['type']
type = type.join(',') if type.is_a?(Array)
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:type => type.to_s,
:page => page,
:hitsPerPage => hits_per_page
}
client.post(Protocol.search_synonyms_uri(name), params.to_json, :read, request_options)
end
#
# Get a synonym
#
# @param objectID the synonym objectID
# @param request_options contains extra parameters to send with your query
def get_synonym(objectID, request_options = {})
client.get(Protocol.synonym_uri(name, objectID), :read, request_options)
end
#
# Delete a synonym
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_synonym!(objectID, forward_to_replicas = false, request_options = {})
res = delete_synonym(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Save a synonym
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym(objectID, synonym, forward_to_replicas = false, request_options = {})
client.put("#{Protocol.synonym_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", synonym.to_json, :write, request_options)
end
#
# Save a synonym and wait the end of indexing
#
# @param objectID the synonym objectID
# @param synonym the synonym
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_synonym!(objectID, synonym, forward_to_replicas = false, request_options = {})
res = save_synonym(objectID, synonym, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Clear all synonyms
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all synonyms and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_synonyms!(forward_to_replicas = false, request_options = {})
res = clear_synonyms(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Add/Update an array of synonyms
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
client.post("#{Protocol.batch_synonyms_uri(name)}?forwardToReplicas=#{forward_to_replicas}&replaceExistingSynonyms=#{replace_existing_synonyms}", synonyms.to_json, :batch, request_options)
end
#
# Add/Update an array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param replace_existing_synonyms should we replace the existing synonyms before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_synonyms!(synonyms, forward_to_replicas = false, replace_existing_synonyms = false, request_options = {})
res = batch_synonyms(synonyms, forward_to_replicas, replace_existing_synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Replace synonyms in the index by the given array of synonyms
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms(synonyms, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_synonyms(synonyms, forward_to_replicas, true, request_options)
end
#
# Replace synonyms in the index by the given array of synonyms and wait the end of indexing
#
# @param synonyms the array of synonyms to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_synonyms!(synonyms, request_options = {})
res = replace_all_synonyms(synonyms, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of synonyms
# Accepts an optional block to which it will pass each synonym
# Also returns an array with all the synonyms
#
# @param hits_per_page Amount of synonyms to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_synonyms(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_synonyms('', { :hitsPerPage => hits_per_page, :page => page }, request_options)['hits']
curr.each do |synonym|
res << synonym
yield synonym if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
#
# Search rules
#
# @param query the query
# @param params an optional hash of :anchoring, :context, :page, :hitsPerPage
# @param request_options contains extra parameters to send with your query
#
def search_rules(query, params = {}, request_options = {})
anchoring = params[:anchoring]
context = params[:context]
page = params[:page] || params['page'] || 0
hits_per_page = params[:hitsPerPage] || params['hitsPerPage'] || 20
params = {
:query => query,
:page => page,
:hitsPerPage => hits_per_page
}
params[:anchoring] = anchoring unless anchoring.nil?
params[:context] = context unless context.nil?
client.post(Protocol.search_rules_uri(name), params.to_json, :read, request_options)
end
#
# Get a rule
#
# @param objectID the rule objectID
# @param request_options contains extra parameters to send with your query
#
def get_rule(objectID, request_options = {})
client.get(Protocol.rule_uri(name, objectID), :read, request_options)
end
#
# Delete a rule
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule(objectID, forward_to_replicas = false, request_options = {})
client.delete("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", :write, request_options)
end
#
# Delete a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def delete_rule!(objectID, forward_to_replicas = false, request_options = {})
res = delete_rule(objectID, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Save a rule
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule(objectID, rule, forward_to_replicas = false, request_options = {})
raise ArgumentError.new('objectID must not be blank') if objectID.nil? || objectID == ''
client.put("#{Protocol.rule_uri(name, objectID)}?forwardToReplicas=#{forward_to_replicas}", rule.to_json, :write, request_options)
end
#
# Save a rule and wait the end of indexing
#
# @param objectID the rule objectID
# @param rule the rule
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def save_rule!(objectID, rule, forward_to_replicas = false, request_options = {})
res = save_rule(objectID, rule, forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Clear all rules
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules(forward_to_replicas = false, request_options = {})
client.post("#{Protocol.clear_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}", {}, :write, request_options)
end
#
# Clear all rules and wait the end of indexing
#
# @param forward_to_replicas should we forward the delete to replica indices
# @param request_options contains extra parameters to send with your query
#
def clear_rules!(forward_to_replicas = false, request_options = {})
res = clear_rules(forward_to_replicas, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Add/Update an array of rules
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
client.post("#{Protocol.batch_rules_uri(name)}?forwardToReplicas=#{forward_to_replicas}&clearExistingRules=#{clear_existing_rules}", rules.to_json, :batch, request_options)
end
#
# Add/Update an array of rules and wait the end of indexing
#
# @param rules the array of rules to add/update
# @param forward_to_replicas should we forward the delete to replica indices
# @param clear_existing_rules should we clear the existing rules before adding the new ones
# @param request_options contains extra parameters to send with your query
#
def batch_rules!(rules, forward_to_replicas = false, clear_existing_rules = false, request_options = {})
res = batch_rules(rules, forward_to_replicas, clear_existing_rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
return res
end
#
# Replace rules in the index by the given array of rules
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules(rules, request_options = {})
forward_to_replicas = request_options[:forwardToReplicas] || request_options['forwardToReplicas'] || false
batch_rules(rules, forward_to_replicas, true, request_options)
end
#
# Replace rules in the index by the given array of rules and wait the end of indexing
#
# @param rules the array of rules to add
# @param request_options contains extra parameters to send with your query
#
def replace_all_rules!(rules, request_options = {})
res = replace_all_rules(rules, request_options)
wait_task(res['taskID'], WAIT_TASK_DEFAULT_TIME_BEFORE_RETRY, request_options)
res
end
#
# Export the full list of rules
# Accepts an optional block to which it will pass each rule
# Also returns an array with all the rules
#
# @param hits_per_page Amount of rules to retrieve on each internal request - Optional - Default: 100
# @param request_options contains extra parameters to send with your query - Optional
#
def export_rules(hits_per_page = 100, request_options = {}, &_block)
res = []
page = 0
loop do
curr = search_rules('', { :hits_per_page => hits_per_page, :page => page }, request_options)['hits']
curr.each do |rule|
res << rule
yield rule if block_given?
end
break if curr.size < hits_per_page
page += 1
end
res
end
# Deprecated
alias_method :get_user_key, :get_api_key
alias_method :list_user_keys, :list_api_keys
alias_method :add_user_key, :add_api_key
alias_method :update_user_key, :update_api_key
alias_method :delete_user_key, :delete_api_key
private
def check_array(object)
raise ArgumentError.new('argument must be an array of objects') if !object.is_a?(Array)
end
def check_object(object, in_array = false)
case object
when Array
raise ArgumentError.new(in_array ? 'argument must be an array of objects' : 'argument must not be an array')
when String, Integer, Float, TrueClass, FalseClass, NilClass
raise ArgumentError.new("argument must be an #{'array of' if in_array} object, got: #{object.inspect}")
else
# ok
end
end
def get_objectID(object, objectID = nil)
check_object(object)
objectID ||= object[:objectID] || object['objectID']
raise ArgumentError.new("Missing 'objectID'") if objectID.nil?
return objectID
end
def build_batch(action, objects, with_object_id = false)
check_array(objects)
{
:requests => objects.map { |object|
check_object(object, true)
h = { :action => action, :body => object }
h[:objectID] = get_objectID(object).to_s if with_object_id
h
}
}
end
def sanitized_delete_by_query_params(params)
params ||= {}
params.delete(:hitsPerPage)
params.delete('hitsPerPage')
params.delete(:attributesToRetrieve)
params.delete('attributesToRetrieve')
params
end
end
|
chicks/sugarcrm | lib/sugarcrm/base.rb | SugarCRM.Base.save | ruby | def save(opts={})
options = { :validate => true }.merge(opts)
return false if !(new_record? || changed?)
if options[:validate]
return false if !valid?
end
begin
save!(options)
rescue
return false
end
true
end | Saves the current object, checks that required fields are present.
returns true or false | train | https://github.com/chicks/sugarcrm/blob/360060139b13788a7ec462c6ecd08d3dbda9849a/lib/sugarcrm/base.rb#L190-L202 | module SugarCRM; class Base
# Unset all of the instance methods we don't need.
instance_methods.each { |m| undef_method m unless m =~ /(^__|^send$|^object_id$|^define_method$|^class$|^nil.$|^methods$|^instance_of.$|^respond_to)/ }
# Tracks if we have extended our class with attribute methods yet.
class_attribute :attribute_methods_generated
self.attribute_methods_generated = false
class_attribute :association_methods_generated
self.association_methods_generated = false
class_attribute :_module
self._module = nil
# the session to which we're linked
class_attribute :session
self.session = nil
# Contains a list of attributes
attr_accessor :attributes, :modified_attributes, :associations, :debug, :errors
class << self # Class methods
def find(*args, &block)
options = args.extract_options!
# add default sorting date (necessary for first and last methods to work)
# most modules (Contacts, Accounts, etc.) use 'date_entered' to store when the record was created
# other modules (e.g. EmailAddresses) use 'date_created'
# Here, we account for this discrepancy...
self.new # make sure the fields are loaded from SugarCRM so method_defined? will work properly
if self.method_defined? :date_entered
sort_criteria = 'date_entered'
elsif self.method_defined? :date_created
sort_criteria = 'date_created'
# Added date_modified because TeamSets doesn't have a date_created or date_entered field.
# There's no test for this because it's Pro and above only.
# Hope this doesn't break anything!
elsif self.method_defined? :date_modified
sort_criteria = 'date_modified'
else
raise InvalidAttribute, "Unable to determine record creation date for sorting criteria: expected date_entered, date_created, or date_modified attribute to be present"
end
options = {:order_by => sort_criteria}.merge(options)
validate_find_options(options)
case args.first
when :first
find_initial(options)
when :last
begin
options[:order_by] = reverse_order_clause(options[:order_by].to_s)
rescue Exception => e
raise
end
find_initial(options)
when :all
Array.wrap(find_every(options, &block)).compact
else
find_from_ids(args, options, &block)
end
end
# return the connection to the correct SugarCRM server (there can be several)
def connection
self.session.connection
end
# return the number of records satifsying the options
# note: the REST API has a bug (documented with Sugar as bug 43339) where passing custom attributes in the options will result in the
# options being ignored and '0' being returned, regardless of the existence of records satisfying the options
def count(options={})
raise InvalidAttribute, 'Conditions on custom attributes are not supported due to REST API bug' if contains_custom_attribute(options[:conditions])
query = query_from_options(options)
connection.get_entries_count(self._module.name, query, options)['result_count'].to_i
end
# A convenience wrapper for <tt>find(:first, *args)</tt>. You can pass in all the
# same arguments to this method as you can to <tt>find(:first)</tt>.
def first(*args, &block)
find(:first, *args, &block)
end
# A convenience wrapper for <tt>find(:last, *args)</tt>. You can pass in all the
# same arguments to this method as you can to <tt>find(:last)</tt>.
def last(*args, &block)
find(:last, *args, &block)
end
# This is an alias for find(:all). You can pass in all the same arguments to this method as you can
# to find(:all)
def all(*args, &block)
find(:all, *args, &block)
end
# Creates an object (or multiple objects) and saves it to SugarCRM if validations pass.
# The resulting object is returned whether the object was saved successfully to the database or not.
#
# The +attributes+ parameter can be either be a Hash or an Array of Hashes. These Hashes describe the
# attributes on the objects that are to be created.
#
# ==== Examples
# # Create a single new object
# User.create(:first_name => 'Jamie')
#
# # Create an Array of new objects
# User.create([{ :first_name => 'Jamie' }, { :first_name => 'Jeremy' }])
#
# # Create a single object and pass it into a block to set other attributes.
# User.create(:first_name => 'Jamie') do |u|
# u.is_admin = false
# end
#
# # Creating an Array of new objects using a block, where the block is executed for each object:
# User.create([{ :first_name => 'Jamie' }, { :first_name => 'Jeremy' }]) do |u|
# u.is_admin = false
# end
def create(attributes = nil, &block)
if attributes.is_a?(Array)
attributes.collect { |attr| create(attr, &block) }
else
object = new(attributes)
yield(object) if block_given?
object.save
object
end
end
end
# Creates an instance of a Module Class, i.e. Account, User, Contact, etc.
def initialize(attributes={}, &block)
attributes.delete('id')
@errors = {}
@modified_attributes = {}
merge_attributes(attributes.with_indifferent_access)
clear_association_cache
define_attribute_methods
define_association_methods
typecast_attributes
self
end
def inspect
self
end
def to_s
attrs = []
@attributes.keys.sort.each do |k|
attrs << "#{k}: #{attribute_for_inspect(k)}"
end
"#<#{self.class} #{attrs.join(", ")}>"
end
# objects are considered equal if they represent the same SugarCRM record
# this behavior is required for Rails to be able to properly cast objects to json (lists, in particular)
def equal?(other)
return false unless other && other.respond_to?(:id)
self.id == other.id
end
# return variables that are defined in SugarCRM, instead of the object's actual variables (such as modified_attributes, errors, etc.)
def instance_variables
@_instance_variables ||= @attributes.keys.map{|i| ('@' + i).to_sym }
end
# override to return the value of the SugarCRM record's attributes
def instance_variable_get(name)
name = name.to_s.gsub(/^@/,'')
@attributes[name]
end
# Rails requires this to (e.g.) generate json representations of models
# this code taken directly from the Rails project
if defined?(Rails)
def instance_values
Hash[instance_variables.map { |name| [name.to_s[1..-1], instance_variable_get(name)] }]
end
end
def to_json(options={})
attributes.to_json
end
def to_xml(options={})
attributes.to_xml
end
# Saves the current object, checks that required fields are present.
# returns true or false
# Saves the current object, and any modified associations.
# Raises an exceptions if save fails for any reason.
def save!(opts={})
save_modified_attributes!(opts)
save_modified_associations!
true
end
def delete
return false if id.blank?
params = {}
params[:id] = serialize_id
params[:deleted]= {:name => "deleted", :value => "1"}
@attributes[:deleted] = (self.class.connection.set_entry(self.class._module.name, params).class == Hash)
end
alias :destroy :delete
# Returns if the record is persisted, i.e. it’s not a new record and it was not destroyed
def persisted?
!(new_record? || destroyed?)
end
# Reloads the record from SugarCRM
def reload!
self.attributes = self.class.find(self.id).attributes
end
def blank?
@attributes.empty?
end
alias :empty? :blank?
# Returns true if +comparison_object+ is the same exact object, or +comparison_object+
# is of the same type and +self+ has an ID and it is equal to +comparison_object.id+.
#
# Note that new records are different from any other record by definition, unless the
# other record is the receiver itself. Besides, if you fetch existing records with
# +select+ and leave the ID out, you're on your own, this predicate will return false.
#
# Note also that destroying a record preserves its ID in the model instance, so deleted
# models are still comparable.
def ==(comparison_object)
comparison_object.instance_of?(self.class) &&
id.present? &&
comparison_object.id == id
end
alias :eql? :==
def update_attribute!(name, value)
self.send("#{name}=".to_sym, value)
self.save!
end
def update_attribute(name, value)
begin
update_attribute!(name, value)
rescue
return false
end
true
end
def update_attributes!(attributes)
attributes.each do |name, value|
self.send("#{name}=".to_sym, value)
end
self.save!
end
def update_attributes(attributes)
begin
update_attributes!(attributes)
rescue
return false
end
true
end
# Returns the URL (in string format) where the module instance is available in CRM
def url
"#{SugarCRM.session.config[:base_url]}/index.php?module=#{self.class._module.name}&action=DetailView&record=#{self.id}"
end
# Delegates to id in order to allow two records of the same type and id to work with something like:
# [ Person.find(1), Person.find(2), Person.find(3) ] & [ Person.find(1), Person.find(4) ] # => [ Person.find(1) ]
def hash
id.hash
end
def pretty_print(pp)
pp.text self.inspect.to_s, 0
end
def attribute_methods_generated?
self.class.attribute_methods_generated
end
def association_methods_generated?
self.class.association_methods_generated
end
def to_key
new_record? ? nil : [ id ]
end
def to_param
id.to_s
end
def is_a?(klass)
superclasses.include? klass
end
alias :kind_of? :is_a?
alias :=== :is_a?
private
# returns true if the hash contains a custom attribute created in Studio (and whose name therefore ends in '_c')
def self.contains_custom_attribute(attributes)
attributes ||= {}
attributes.each_key{|k|
return true if k.to_s =~ /_c$/
}
false
end
def superclasses
return @superclasses if @superclasses
@superclasses = [self.class]
current_class = self.class
while current_class.respond_to? :superclass
@superclasses << (current_class = current_class.superclass)
end
@superclasses
end
Base.class_eval do
extend FinderMethods::ClassMethods
include AttributeMethods
extend AttributeMethods::ClassMethods
include AttributeValidations
include AttributeTypeCast
include AttributeSerializers
include AssociationMethods
extend AssociationMethods::ClassMethods
include AssociationCache
end
end; end |
ideonetwork/lato-blog | lib/lato_blog/interfaces/categories.rb | LatoBlog.Interface::Categories.blog__clean_category_parents | ruby | def blog__clean_category_parents
category_parents = LatoBlog::CategoryParent.all
category_parents.map { |cp| cp.destroy if cp.categories.empty? }
end | This function cleans all old category parents without any child. | train | https://github.com/ideonetwork/lato-blog/blob/a0d92de299a0e285851743b9d4a902f611187cba/lib/lato_blog/interfaces/categories.rb#L28-L31 | module Interface::Categories
# This function create the default category if it not exists.
def blog__create_default_category
category_parent = LatoBlog::CategoryParent.find_by(meta_default: true)
return if category_parent
category_parent = LatoBlog::CategoryParent.new(meta_default: true)
throw 'Impossible to create default category parent' unless category_parent.save
languages = blog__get_languages_identifier
languages.each do |language|
category = LatoBlog::Category.new(
title: 'Default',
meta_permalink: "default_#{language}",
meta_language: language,
lato_core_superuser_creator_id: 1,
lato_blog_category_parent_id: category_parent.id
)
throw 'Impossible to create default category' unless category.save
end
end
# This function cleans all old category parents without any child.
# This function returns an object with the list of categories with some filters.
def blog__get_categories(
order: nil,
language: nil,
search: nil,
page: nil,
per_page: nil
)
categories = LatoBlog::Category.all
# apply filters
order = order && order == 'ASC' ? 'ASC' : 'DESC'
categories = _categories_filter_by_order(categories, order)
categories = _categories_filter_by_language(categories, language)
categories = _categories_filter_search(categories, search)
# take categories uniqueness
categories = categories.uniq(&:id)
# save total categories
total = categories.length
# manage pagination
page = page&.to_i || 1
per_page = per_page&.to_i || 20
categories = core__paginate_array(categories, per_page, page)
# return result
{
categories: categories && !categories.empty? ? categories.map(&:serialize) : [],
page: page,
per_page: per_page,
order: order,
total: total
}
end
# This function returns a single category searched by id or permalink.
def blog__get_category(id: nil, permalink: nil)
return {} unless id || permalink
if id
category = LatoBlog::Category.find_by(id: id.to_i)
else
category = LatoBlog::Category.find_by(meta_permalink: permalink)
end
category.serialize
end
private
def _categories_filter_by_order(categories, order)
categories.order("title #{order}")
end
def _categories_filter_by_language(categories, language)
return categories unless language
categories.where(meta_language: language)
end
def _categories_filter_search(categories, search)
return categories unless search
categories.where('title like ?', "%#{search}%")
end
end
|
mongodb/mongoid | lib/mongoid/serializable.rb | Mongoid.Serializable.serialize_attribute | ruby | def serialize_attribute(attrs, name, names, options)
if relations.key?(name)
value = send(name)
attrs[name] = value ? value.serializable_hash(options) : nil
elsif names.include?(name) && !fields.key?(name)
attrs[name] = read_raw_attribute(name)
elsif !attribute_missing?(name)
attrs[name] = send(name)
end
end | Serialize a single attribute. Handles associations, fields, and dynamic
attributes.
@api private
@example Serialize the attribute.
document.serialize_attribute({}, "id" , [ "id" ])
@param [ Hash ] attrs The attributes.
@param [ String ] name The attribute name.
@param [ Array<String> ] names The names of all attributes.
@param [ Hash ] options The options.
@return [ Object ] The attribute.
@since 3.0.0 | train | https://github.com/mongodb/mongoid/blob/56976e32610f4c2450882b0bfe14da099f0703f4/lib/mongoid/serializable.rb#L100-L109 | module Serializable
extend ActiveSupport::Concern
# We need to redefine where the JSON configuration is getting defined,
# similar to +ActiveRecord+.
included do
undef_method :include_root_in_json
delegate :include_root_in_json, to: ::Mongoid
end
# Gets the document as a serializable hash, used by ActiveModel's JSON
# serializer.
#
# @example Get the serializable hash.
# document.serializable_hash
#
# @example Get the serializable hash with options.
# document.serializable_hash(:include => :addresses)
#
# @param [ Hash ] options The options to pass.
#
# @option options [ Symbol ] :include What associations to include.
# @option options [ Symbol ] :only Limit the fields to only these.
# @option options [ Symbol ] :except Dont include these fields.
# @option options [ Symbol ] :methods What methods to include.
#
# @return [ Hash ] The document, ready to be serialized.
#
# @since 2.0.0.rc.6
def serializable_hash(options = nil)
options ||= {}
attrs = {}
names = field_names(options)
method_names = Array.wrap(options[:methods]).map do |name|
name.to_s if respond_to?(name)
end.compact
(names + method_names).each do |name|
without_autobuild do
serialize_attribute(attrs, name, names, options)
end
end
serialize_relations(attrs, options) if options[:include]
attrs
end
private
# Get the names of all fields that will be serialized.
#
# @api private
#
# @example Get all the field names.
# document.send(:field_names)
#
# @return [ Array<String> ] The names of the fields.
#
# @since 3.0.0
def field_names(options)
names = (as_attributes.keys + attribute_names).uniq.sort
only = Array.wrap(options[:only]).map(&:to_s)
except = Array.wrap(options[:except]).map(&:to_s)
except |= ['_type'] unless Mongoid.include_type_for_serialization
if !only.empty?
names &= only
elsif !except.empty?
names -= except
end
names
end
# Serialize a single attribute. Handles associations, fields, and dynamic
# attributes.
#
# @api private
#
# @example Serialize the attribute.
# document.serialize_attribute({}, "id" , [ "id" ])
#
# @param [ Hash ] attrs The attributes.
# @param [ String ] name The attribute name.
# @param [ Array<String> ] names The names of all attributes.
# @param [ Hash ] options The options.
#
# @return [ Object ] The attribute.
#
# @since 3.0.0
# For each of the provided include options, get the association needed and
# provide it in the hash.
#
# @example Serialize the included associations.
# document.serialize_relations({}, :include => :addresses)
#
# @param [ Hash ] attributes The attributes to serialize.
# @param [ Hash ] options The serialization options.
#
# @option options [ Symbol ] :include What associations to include
# @option options [ Symbol ] :only Limit the fields to only these.
# @option options [ Symbol ] :except Dont include these fields.
#
# @since 2.0.0.rc.6
def serialize_relations(attributes = {}, options = {})
inclusions = options[:include]
relation_names(inclusions).each do |name|
association = relations[name.to_s]
if association && relation = send(association.name)
attributes[association.name.to_s] =
relation.serializable_hash(relation_options(inclusions, options, name))
end
end
end
# Since the inclusions can be a hash, symbol, or array of symbols, this is
# provided as a convenience to parse out the names.
#
# @example Get the association names.
# document.relation_names(:include => [ :addresses ])
#
# @param [ Hash, Symbol, Array<Symbol> ] inclusions The inclusions.
#
# @return [ Array<Symbol> ] The names of the included associations.
#
# @since 2.0.0.rc.6
def relation_names(inclusions)
inclusions.is_a?(Hash) ? inclusions.keys : Array.wrap(inclusions)
end
# Since the inclusions can be a hash, symbol, or array of symbols, this is
# provided as a convenience to parse out the options.
#
# @example Get the association options.
# document.relation_names(:include => [ :addresses ])
#
# @param [ Hash, Symbol, Array<Symbol> ] inclusions The inclusions.
# @param [ Hash ] options The options.
# @param [ Symbol ] name The name of the association.
#
# @return [ Hash ] The options for the association.
#
# @since 2.0.0.rc.6
def relation_options(inclusions, options, name)
if inclusions.is_a?(Hash)
inclusions[name]
else
{ except: options[:except], only: options[:only] }
end
end
end
|
stevedowney/rails_view_helpers | app/helpers/rails_view_helpers/html_helper.rb | RailsViewHelpers.HtmlHelper.td_bln | ruby | def td_bln(*args)
options = canonicalize_options(args.extract_options!)
options = ensure_class(options, 'c')
content_tag(:td, bln(*args), options)
end | Same as +bln+ but wrapped in a TD and centered (w/rail_view_helper.css)
@example
td_bln(true) #=> <td class="c">✔</td>
@return [String] | train | https://github.com/stevedowney/rails_view_helpers/blob/715c7daca9434c763b777be25b1069ecc50df287/app/helpers/rails_view_helpers/html_helper.rb#L48-L53 | module HtmlHelper
# Includes controller and action name as data attributes.
#
# @example
# body_tag() #=> <body data-action='index' data-controller='home'>
#
# body_tag(id: 'my-id', class: 'my-class') #=> <body class="my-class" data-action="index" data-controller="home" id="my-id">
# @param options [Hash] become attributes of the BODY tag
# @return [String]
def body_tag(options={}, &block)
options = canonicalize_options(options)
options.delete(:class) if options[:class].blank?
options[:data] ||= {}
options[:data][:controller] = controller.controller_name
options[:data][:action] = controller.action_name
content_tag(:body, options) do
yield
end
end
# Displays a check-mark (✔) when _bln_ is +true+ else +nbsp+.
#
# @example
# bln(true) #=> "✔" (check mark)
# bln(false) #=> " " (space)
# bln(true, 'Admin') #=> "Admin"
# bln(false, 'Admin', "not Admin") #=> "not Admin"
# @param bool [Boolean]
# @param true_string [String] the string to display if _bln_ is +true+. Caller is responsible for escaping and/or marking html_safe.
# @param false_string [String] the string to display if _bln_ is +false+. Caller is responsible for escaping and/or marking html_safe.
def bln(bool, true_string="✔".html_safe, false_string=nbsp)
if bool
true_string
else
false_string
end
end
# Same as +bln+ but wrapped in a TD and centered (w/rail_view_helper.css)
#
# @example
# td_bln(true) #=> <td class="c">✔</td>
# @return [String]
def th_actions(*args)
options = canonicalize_options(args.extract_options!)
colspan = args.shift || 1
text = args.shift || 'Actions'
options[:colspan] = colspan
options[:class] = 'c' if options[:class].empty?
content_tag(:th, text, options)
end
# Returns one or more non-breaking spaces ( ) marked +html_safe+.
#
# @example
# nbsp #=> " "
# nbsp(3) #=> " "
#
# @param count [FixNum] the number of non-breaking spaces to return
# @return [String] that is html_safe
def nbsp(count=1)
(' ' * count).html_safe
end
end
|
colstrom/ezmq | lib/ezmq/subscribe.rb | EZMQ.Subscriber.receive | ruby | def receive(**options)
message = ''
@socket.recv_string message
message = message.match(/^(?<topic>[^\ ]*)\ (?<body>.*)/m)
decoded = (options[:decode] || @decode).call message['body']
if block_given?
yield decoded, message['topic']
else
[decoded, message['topic']]
end
end | Creates a new Subscriber socket.
@note The default behaviour is to output and messages received to STDOUT.
@param [:bind, :connect] mode (:connect) a mode for the socket.
@param [Hash] options optional parameters.
@option options [String] topic a topic to subscribe to.
@see EZMQ::Socket EZMQ::Socket for optional parameters.
@return [Publisher] a new instance of Publisher.
Receive a message from the socket.
@note This method blocks until a message arrives.
@param [Hash] options optional parameters.
@option options [lambda] decode how to decode the message.
@yield [message, topic] passes the message body and topic to the block.
@yieldparam [Object] message the message received (decoded).
@yieldparam [String] topic the topic of the message.
@return [Object] the message received (decoded). | train | https://github.com/colstrom/ezmq/blob/cd7f9f256d6c3f7a844871a3a77a89d7122d5836/lib/ezmq/subscribe.rb#L36-L48 | class Subscriber < EZMQ::Socket
# Creates a new Subscriber socket.
#
# @note The default behaviour is to output and messages received to STDOUT.
#
# @param [:bind, :connect] mode (:connect) a mode for the socket.
# @param [Hash] options optional parameters.
# @option options [String] topic a topic to subscribe to.
# @see EZMQ::Socket EZMQ::Socket for optional parameters.
#
# @return [Publisher] a new instance of Publisher.
#
def initialize(mode = :connect, **options)
super mode, ZMQ::SUB, options
subscribe options[:topic] if options[:topic]
end
# Receive a message from the socket.
#
# @note This method blocks until a message arrives.
#
# @param [Hash] options optional parameters.
# @option options [lambda] decode how to decode the message.
#
# @yield [message, topic] passes the message body and topic to the block.
# @yieldparam [Object] message the message received (decoded).
# @yieldparam [String] topic the topic of the message.
#
# @return [Object] the message received (decoded).
#
# Like receive, but doesn't stop at one message.
#
# @yield [message, topic] passes the message body and topic to the block.
# @yieldparam [String] message the message received.
# @yieldparam [String] topic the topic of the message.
#
# @return [void]
#
def listen(&block)
loop do
block.call(*receive)
end
end
# Establishes a new message filter on the socket.
#
# @note By default, a Subscriber filters all incoming messages. Without
# calling subscribe at least once, no messages will be accepted. If topic
# was provided, #initialize calls #subscribe automatically.
#
# @param [String] topic a topic to subscribe to. Messages matching this
# prefix will be accepted.
#
# @return [Boolean] was subscription successful?
#
def subscribe(topic)
@socket.setsockopt(ZMQ::SUBSCRIBE, topic) == 0
end
# Removes a message filter (as set with subscribe) from the socket.
#
# @param [String] topic the topic to unsubscribe from. If multiple filters
# with the same topic are set, this will only remove one.
#
# @return [Boolean] was unsubscription successful?
#
def unsubscribe(topic)
@socket.setsockopt(ZMQ::UNSUBSCRIBE, topic) == 0
end
end
|
watsonbox/pocketsphinx-ruby | lib/pocketsphinx/decoder.rb | Pocketsphinx.Decoder.decode_raw | ruby | def decode_raw(audio_file, max_samples = 2048)
start_utterance
FFI::MemoryPointer.new(:int16, max_samples) do |buffer|
while data = audio_file.read(max_samples * 2)
buffer.write_string(data)
process_raw(buffer, data.length / 2)
end
end
end_utterance
end | Decode a raw audio stream as a single utterance.
No headers are recognized in this files. The configuration parameters samprate
and input_endian are used to determine the sampling rate and endianness of the stream,
respectively. Audio is always assumed to be 16-bit signed PCM.
@param [IO] audio_file The raw audio stream to decode as a single utterance
@param [Fixnum] max_samples The maximum samples to process from the stream on each iteration | train | https://github.com/watsonbox/pocketsphinx-ruby/blob/12c71c35285c38b42bd7779c8246923bd5be150f/lib/pocketsphinx/decoder.rb#L71-L82 | class Decoder
require 'delegate'
include API::CallHelpers
class Hypothesis < SimpleDelegator
attr_accessor :path_score
attr_accessor :posterior_prob
def initialize(string, path_score, posterior_prob = nil)
@path_score = path_score
@posterior_prob = posterior_prob
super(string)
end
end
Word = Struct.new(:word, :start_frame, :end_frame)
attr_writer :ps_api
attr_accessor :configuration
# Initialize a Decoder
#
# Note that this initialization process actually updates the Configuration based on settings
# which are found in feat.params along with the acoustic model.
#
# @param [Configuration] configuration
# @param [FFI::Pointer] ps_decoder An optional Pocketsphinx decoder. One is initialized if not provided.
def initialize(configuration, ps_decoder = nil)
@configuration = configuration
init_decoder if ps_decoder.nil?
end
# Reinitialize the decoder with updated configuration.
#
# This function allows you to switch the acoustic model, dictionary, or other configuration
# without creating an entirely new decoding object.
#
# @param [Configuration] configuration An optional new configuration to use. If this is
# nil, the previous configuration will be reloaded, with any changes applied.
def reconfigure(configuration = nil)
self.configuration = configuration if configuration
reinit_decoder
end
# Decode a raw audio stream as a single utterance, opening a file if path given
#
# See #decode_raw
#
# @param [IO] audio_path_or_file The raw audio stream or file path to decode as a single utterance
# @param [Fixnum] max_samples The maximum samples to process from the stream on each iteration
def decode(audio_path_or_file, max_samples = 2048)
case audio_path_or_file
when String
File.open(audio_path_or_file, 'rb') { |f| decode_raw(f, max_samples) }
else
decode_raw(audio_path_or_file, max_samples)
end
end
# Decode a raw audio stream as a single utterance.
#
# No headers are recognized in this files. The configuration parameters samprate
# and input_endian are used to determine the sampling rate and endianness of the stream,
# respectively. Audio is always assumed to be 16-bit signed PCM.
#
# @param [IO] audio_file The raw audio stream to decode as a single utterance
# @param [Fixnum] max_samples The maximum samples to process from the stream on each iteration
# Decode raw audio data.
#
# @param [Boolean] no_search If non-zero, perform feature extraction but don't do any
# recognition yet. This may be necessary if your processor has trouble doing recognition in
# real-time.
# @param [Boolean] full_utt If non-zero, this block of data is a full utterance
# worth of data. This may allow the recognizer to produce more accurate results.
# @return Number of frames of data searched
def process_raw(buffer, size, no_search = false, full_utt = false)
api_call :ps_process_raw, ps_decoder, buffer, size, no_search ? 1 : 0, full_utt ? 1 : 0
end
# Start utterance processing.
#
# This function should be called before any utterance data is passed
# to the decoder. It marks the start of a new utterance and
# reinitializes internal data structures.
def start_utterance
api_call :ps_start_utt, ps_decoder
end
# End utterance processing
def end_utterance
api_call :ps_end_utt, ps_decoder
end
# Checks if the last feed audio buffer contained speech
def in_speech?
ps_api.ps_get_in_speech(ps_decoder) != 0
end
# Get hypothesis string (with #path_score and #utterance_id).
#
# @return [Hypothesis] Hypothesis (behaves like a string)
def hypothesis
mp_path_score = FFI::MemoryPointer.new(:int32, 1)
hypothesis = ps_api.ps_get_hyp(ps_decoder, mp_path_score)
posterior_prob = ps_api.ps_get_prob(ps_decoder)
hypothesis.nil? ? nil : Hypothesis.new(
hypothesis,
log_prob_to_linear(mp_path_score.get_int32(0)),
log_prob_to_linear(posterior_prob)
)
end
# Get an array of words with start/end frame values (10msec/frame) for current hypothesis
#
# @return [Array] Array of words with start/end frame values (10msec/frame)
def words
mp_path_score = FFI::MemoryPointer.new(:int32, 1)
start_frame = FFI::MemoryPointer.new(:int32, 1)
end_frame = FFI::MemoryPointer.new(:int32, 1)
seg_iter = ps_api.ps_seg_iter(ps_decoder, mp_path_score)
words = []
until seg_iter.null? do
ps_api.ps_seg_frames(seg_iter, start_frame, end_frame)
words << Pocketsphinx::Decoder::Word.new(
ps_api.ps_seg_word(seg_iter),
start_frame.get_int32(0),
end_frame.get_int32(0)
)
seg_iter = ps_api.ps_seg_next(seg_iter)
end
words
end
# Adds new search using JSGF model.
#
# Convenience method to parse JSGF model from string and create a search.
#
# @param [String] jsgf_string The JSGF grammar
# @param [String] name The search name
def set_jsgf_string(jsgf_string, name = 'default')
api_call :ps_set_jsgf_string, ps_decoder, name, jsgf_string
end
# Returns name of curent search in decoder
def get_search
ps_api.ps_get_search(ps_decoder)
end
# Actives search with the provided name.
#
# Activates search with the provided name. The search must be added before
# using either ps_set_fsg(), ps_set_lm() or ps_set_kws().
def set_search(name = 'default')
api_call :ps_set_search, ps_decoder, name
end
# Unsets the search and releases related resources.
#
# Unsets the search previously added with
# using either ps_set_fsg(), ps_set_lm() or ps_set_kws().
def unset_search(name = 'default')
api_call :ps_unset_search, ps_decoder, name
end
def ps_api
@ps_api || API::Pocketsphinx
end
def ps_decoder
init_decoder if @ps_decoder.nil?
@ps_decoder
end
private
def init_decoder
@ps_decoder = ps_api.ps_init(configuration.ps_config)
post_init_decoder
end
def reinit_decoder
ps_api.ps_reinit(ps_decoder, configuration.ps_config).tap do |result|
raise API::Error, "Decoder#reconfigure failed with error code #{result}" if result < 0
post_init_decoder
end
end
def post_init_decoder
if configuration.respond_to?(:post_init_decoder)
configuration.post_init_decoder(self)
end
end
# Convert logarithmic probability to linear floating point
def log_prob_to_linear(log_prob)
logmath = ps_api.ps_get_logmath(ps_decoder)
ps_api.logmath_exp(logmath, log_prob)
end
end
|
etewiah/property_web_builder | app/controllers/pwb/import/translations_controller.rb | Pwb.Import::TranslationsController.multiple | ruby | def multiple
I18n::Backend::ActiveRecord::Translation.import(params[:file])
return render json: { "success": true }, status: :ok, head: :no_content
# redirect_to root_url, notice: "I18n::Backend::ActiveRecord::Translations imported."
end | http://localhost:3000/import/translations/multiple | train | https://github.com/etewiah/property_web_builder/blob/fba4e6d4ffa7bc1f4d3b50dfa5a6a9fbfee23f21/app/controllers/pwb/import/translations_controller.rb#L5-L11 | class Import::TranslationsController < ApplicationApiController
# http://localhost:3000/import/translations/multiple
end
|
nylas/nylas-ruby | lib/nylas/api.rb | Nylas.API.revoke | ruby | def revoke(access_token)
response = client.as(access_token).post(path: "/oauth/revoke")
response.code == 200 && response.empty?
end | Revokes access to the Nylas API for the given access token
@return [Boolean] | train | https://github.com/nylas/nylas-ruby/blob/5453cf9b2e9d80ee55e38ff5a6c8b19b8d5c262d/lib/nylas/api.rb#L97-L100 | class API
attr_accessor :client
extend Forwardable
def_delegators :client, :execute, :get, :post, :put, :delete, :app_id
include Logging
# @param client [HttpClient] Http Client to use for retrieving data
# @param app_id [String] Your application id from the Nylas Dashboard
# @param app_secret [String] Your application secret from the Nylas Dashboard
# @param access_token [String] (Optional) Your users access token.
# @param api_server [String] (Optional) Which Nylas API Server to connect to. Only change this if
# you're using a self-hosted Nylas instance.
# @param service_domain [String] (Optional) Host you are authenticating OAuth against.
# @return [Nylas::API]
def initialize(client: nil, app_id: nil, app_secret: nil, access_token: nil,
api_server: "https://api.nylas.com", service_domain: "api.nylas.com")
self.client = client || HttpClient.new(app_id: app_id, app_secret: app_secret,
access_token: access_token, api_server: api_server,
service_domain: service_domain)
end
# @return [String] A Nylas access token for that particular user.
def authenticate(name:, email_address:, provider:, settings:, reauth_account_id: nil, scopes: nil)
NativeAuthentication.new(api: self).authenticate(
name: name,
email_address: email_address,
provider: provider,
settings: settings,
reauth_account_id: reauth_account_id,
scopes: scopes
)
end
# @return [Collection<Contact>] A queryable collection of Contacts
def contacts
@contacts ||= Collection.new(model: Contact, api: self)
end
# @return [CurrentAccount] The account details for whomevers access token is set
def current_account
prevent_calling_if_missing_access_token(:current_account)
CurrentAccount.from_hash(execute(method: :get, path: "/account"), api: self)
end
# @return [Collection<Account>] A queryable collection of {Account}s
def accounts
@accounts ||= Collection.new(model: Account, api: as(client.app_secret))
end
# @return [Collection<Calendar>] A queryable collection of {Calendar}s
def calendars
@calendars ||= Collection.new(model: Calendar, api: self)
end
# @return [DeltasCollection<Delta>] A queryable collection of Deltas, which are themselves a collection.
def deltas
@deltas ||= DeltasCollection.new(api: self)
end
# @return[Collection<Draft>] A queryable collection of {Draft} objects
def drafts
@drafts ||= Collection.new(model: Draft, api: self)
end
# @return [Collection<Event>] A queryable collection of {Event}s
def events
@events ||= EventCollection.new(model: Event, api: self)
end
# @return [Collection<Folder>] A queryable collection of {Folder}s
def folders
@folders ||= Collection.new(model: Folder, api: self)
end
# @return [Collection<File>] A queryable collection of {File}s
def files
@files ||= Collection.new(model: File, api: self)
end
# @return [Collection<Label>] A queryable collection of {Label} objects
def labels
@labels ||= Collection.new(model: Label, api: self)
end
# @return[Collection<Message>] A queryable collection of {Message} objects
def messages
@messages ||= Collection.new(model: Message, api: self)
end
# Revokes access to the Nylas API for the given access token
# @return [Boolean]
# Returns list of IP addresses
# @return [Hash]
# hash has keys of :updated_at (unix timestamp) and :ip_addresses (array of strings)
def ip_addresses
path = "/a/#{app_id}/ip_addresses"
client.as(client.app_secret).get(path: path)
end
# @param message [Hash, String, #send!]
# @return [Message] The resulting message
def send!(message)
return message.send! if message.respond_to?(:send!)
return NewMessage.new(**message.merge(api: self)).send! if message.respond_to?(:key?)
return RawMessage.new(message, api: self).send! if message.is_a? String
end
# Allows you to get an API that acts as a different user but otherwise has the same settings
# @param [String] Oauth Access token or app secret used to authenticate with the API
# @return [API]
def as(access_token)
API.new(client: client.as(access_token))
end
# @return [Collection<Thread>] A queryable collection of Threads
def threads
@threads ||= Collection.new(model: Thread, api: self)
end
# @return [Collection<Webhook>] A queryable collection of {Webhook}s
def webhooks
@webhooks ||= Collection.new(model: Webhook, api: as(client.app_secret))
end
private
def prevent_calling_if_missing_access_token(method_name)
return if client.access_token && !client.access_token.empty?
raise NoAuthToken, method_name
end
end
|
dagrz/nba_stats | lib/nba_stats/stats/scoreboard.rb | NbaStats.Scoreboard.scoreboard | ruby | def scoreboard(
game_date=Date.today,
day_offset=0,
league_id=NbaStats::Constants::LEAGUE_ID_NBA
)
NbaStats::Resources::Scoreboard.new(
get(SCOREBOARD_PATH, {
:LeagueID => league_id,
:GameDate => game_date.strftime('%m-%d-%Y'),
:DayOffset => day_offset
})
)
end | Calls the scoreboard API and returns a Scoreboard resource.
@param game_date [Date]
@param day_offset [Integer]
@param league_id [String]
@return [NbaStats::Resources::Scoreboard] | train | https://github.com/dagrz/nba_stats/blob/d6fe6cf81f74a2ce7a054aeec5e9db59a6ec42aa/lib/nba_stats/stats/scoreboard.rb#L17-L29 | module Scoreboard
# The path of the scoreboard API
SCOREBOARD_PATH = '/stats/scoreboard'
# Calls the scoreboard API and returns a Scoreboard resource.
#
# @param game_date [Date]
# @param day_offset [Integer]
# @param league_id [String]
# @return [NbaStats::Resources::Scoreboard]
end # Scoreboard
|
mrakobeze/vrtk | src/vrtk/applets/version_applet.rb | VRTK::Applets.VersionApplet.run_parse | ruby | def run_parse
obj = {
'name' => VRTK::NAME,
'version' => VRTK::VERSION,
'codename' => VRTK::CODENAME,
'ffmpeg' => ffmpeg_version,
'magick' => magick_version,
'license' => VRTK::LICENSE
}
puts JSON.pretty_unparse obj
end | noinspection RubyStringKeysInHashInspection,RubyResolve | train | https://github.com/mrakobeze/vrtk/blob/444052951949e3faab01f6292345dcd0a789f005/src/vrtk/applets/version_applet.rb#L34-L45 | class VersionApplet < BaseApplet
include VRTK::Utils
def init_options
@pa = false
OptionParser.new do |opts|
opts.on('-p', '--parsable', 'Generate parsable version output') do |v|
@pa = v
end
end
end
def run
if @pa
run_parse
else
run_human
end
end
# noinspection RubyStringKeysInHashInspection,RubyResolve
# noinspection RubyResolve
def run_human
text = []
text << %[#{VRTK::NAME} #{VRTK::VERSION}]
text << %[Codename: #{VRTK::CODENAME}]
text << %[FFMPEG version: #{ffmpeg_version}]
text << %[ImageMagick version: #{magick_version}]
text << ''
text << %q[You can use '-p' option to get output in JSON.]
text << ''
text << ''
text << disclaimer
puts text.join("\n")
end
def self.name
'Version applet'
end
def self.id
'version'
end
def self.desc
'Displays VRTK version'
end
private
def ffmpeg_version
ffmpeg = FFMpeg.resolve.ffmpeg
`#{ffmpeg} -version`
.split(/[\r\n]+/).first
.split('Copyright').first
.split('version').last
.strip
end
def magick_version
mogrify = MiniMagick::Tool::Mogrify.new
mogrify << '--version'
mogrify.call
.split(/[\r\n]+/).first
.split('x64').first
.split('ImageMagick').last
.strip
end
def disclaimer
[
%q[Copyright 2018 MRAKOBEZE],
%q[],
%q[Licensed under the Apache License, Version 2.0 (the "License");],
%q[you may not use this file except in compliance with the License.],
%q[You may obtain a copy of the License at],
%q[],
%q[http://www.apache.org/licenses/LICENSE-2.0],
%q[],
%q[Unless required by applicable law or agreed to in writing, software],
%q[distributed under the License is distributed on an "AS IS" BASIS,],
%q[WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.],
%q[See the License for the specific language governing permissions and],
%q[limitations under the License.]
].join "\n"
end
end
|
lostisland/faraday | lib/faraday/connection.rb | Faraday.Connection.url_prefix= | ruby | def url_prefix=(url, encoder = nil)
uri = @url_prefix = Utils.URI(url)
self.path_prefix = uri.path
params.merge_query(uri.query, encoder)
uri.query = nil
with_uri_credentials(uri) do |user, password|
basic_auth user, password
uri.user = uri.password = nil
end
end | Parses the given URL with URI and stores the individual
components in this connection. These components serve as defaults for
requests made by this connection.
@param url [String, URI]
@param encoder [Object]
@example
conn = Faraday::Connection.new { ... }
conn.url_prefix = "https://sushi.com/api"
conn.scheme # => https
conn.path_prefix # => "/api"
conn.get("nigiri?page=2") # accesses https://sushi.com/api/nigiri | train | https://github.com/lostisland/faraday/blob/3abe9d1eea4bdf61cdf7b76ff9f1ae7e09482e70/lib/faraday/connection.rb#L420-L431 | class Connection
# A Set of allowed HTTP verbs.
METHODS = Set.new %i[get post put delete head patch options trace connect]
# @return [Hash] URI query unencoded key/value pairs.
attr_reader :params
# @return [Hash] unencoded HTTP header key/value pairs.
attr_reader :headers
# @return [String] a URI with the prefix used for all requests from this
# Connection. This includes a default host name, scheme, port, and path.
attr_reader :url_prefix
# @return [Faraday::Builder] Builder for this Connection.
attr_reader :builder
# @return [Hash] SSL options.
attr_reader :ssl
# @return [Object] the parallel manager for this Connection.
attr_reader :parallel_manager
# Sets the default parallel manager for this connection.
attr_writer :default_parallel_manager
# @return [Hash] proxy options.
attr_reader :proxy
# Initializes a new Faraday::Connection.
#
# @param url [URI, String] URI or String base URL to use as a prefix for all
# requests (optional).
# @param options [Hash, Faraday::ConnectionOptions]
# @option options [URI, String] :url ('http:/') URI or String base URL
# @option options [Hash<String => String>] :params URI query unencoded
# key/value pairs.
# @option options [Hash<String => String>] :headers Hash of unencoded HTTP
# header key/value pairs.
# @option options [Hash] :request Hash of request options.
# @option options [Hash] :ssl Hash of SSL options.
# @option options [Hash, URI, String] :proxy proxy options, either as a URL
# or as a Hash
# @option options [URI, String] :proxy[:uri]
# @option options [String] :proxy[:user]
# @option options [String] :proxy[:password]
# @yield [self] after all setup has been done
def initialize(url = nil, options = nil)
options = ConnectionOptions.from(options)
if url.is_a?(Hash) || url.is_a?(ConnectionOptions)
options = options.merge(url)
url = options.url
end
@parallel_manager = nil
@headers = Utils::Headers.new
@params = Utils::ParamsHash.new
@options = options.request
@ssl = options.ssl
@default_parallel_manager = options.parallel_manager
@builder = options.builder || begin
# pass an empty block to Builder so it doesn't assume default middleware
options.new_builder(block_given? ? proc { |b| } : nil)
end
self.url_prefix = url || 'http:/'
@params.update(options.params) if options.params
@headers.update(options.headers) if options.headers
initialize_proxy(url, options)
yield(self) if block_given?
@headers[:user_agent] ||= "Faraday v#{VERSION}"
end
def initialize_proxy(url, options)
@manual_proxy = !!options.proxy
@proxy =
if options.proxy
ProxyOptions.from(options.proxy)
else
proxy_from_env(url)
end
@temp_proxy = @proxy
end
# Sets the Hash of URI query unencoded key/value pairs.
# @param hash [Hash]
def params=(hash)
@params.replace hash
end
# Sets the Hash of unencoded HTTP header key/value pairs.
# @param hash [Hash]
def headers=(hash)
@headers.replace hash
end
extend Forwardable
def_delegators :builder, :build, :use, :request, :response, :adapter, :app
# @!method get(url = nil, params = nil, headers = nil)
# Makes a GET HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.get '/items', { page: 1 }, :accept => 'application/json'
#
# # ElasticSearch example sending a body with GET.
# conn.get '/twitter/tweet/_search' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(query: {...})
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method head(url = nil, params = nil, headers = nil)
# Makes a HEAD HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.head '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method delete(url = nil, params = nil, headers = nil)
# Makes a DELETE HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.delete '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method connect(url = nil, params = nil, headers = nil)
# Makes a CONNECT HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.connect '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method trace(url = nil, params = nil, headers = nil)
# Makes a TRACE HTTP request without a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.connect '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!visibility private
METHODS_WITH_QUERY.each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(url = nil, params = nil, headers = nil)
run_request(:#{method}, url, nil, headers) do |request|
request.params.update(params) if params
yield request if block_given?
end
end
RUBY
end
# @overload options()
# Returns current Connection options.
#
# @overload options(url, params = nil, headers = nil)
# Makes an OPTIONS HTTP request to the given URL.
# @param url [String] String base URL to sue as a prefix for all requests.
# @param params [Hash] Hash of URI query unencoded key/value pairs.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.options '/items/1'
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
def options(*args)
return @options if args.size.zero?
url, params, headers = *args
run_request(:options, url, nil, headers) do |request|
request.params.update(params) if params
yield request if block_given?
end
end
# @!method post(url = nil, body = nil, headers = nil)
# Makes a POST HTTP request with a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param body [String] body for the request.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# conn.post '/items', data, content_type: 'application/json'
#
# # Simple ElasticSearch indexing sample.
# conn.post '/twitter/tweet' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(user: 'kimchy', ...)
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!method put(url = nil, body = nil, headers = nil)
# Makes a PUT HTTP request with a body.
# @!scope class
#
# @param url [String] The optional String base URL to use as a prefix for
# all requests. Can also be the options Hash.
# @param body [String] body for the request.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @example
# # TODO: Make it a PUT example
# conn.post '/items', data, content_type: 'application/json'
#
# # Simple ElasticSearch indexing sample.
# conn.post '/twitter/tweet' do |req|
# req.headers[:content_type] = 'application/json'
# req.params[:routing] = 'kimchy'
# req.body = JSON.generate(user: 'kimchy', ...)
# end
#
# @yield [Faraday::Request] for further request customizations
# @return [Faraday::Response]
# @!visibility private
METHODS_WITH_BODY.each do |method|
class_eval <<-RUBY, __FILE__, __LINE__ + 1
def #{method}(url = nil, body = nil, headers = nil, &block)
run_request(:#{method}, url, body, headers, &block)
end
RUBY
end
# Sets up the Authorization header with these credentials, encoded
# with base64.
#
# @param login [String] The authentication login.
# @param pass [String] The authentication password.
#
# @example
#
# conn.basic_auth 'Aladdin', 'open sesame'
# conn.headers['Authorization']
# # => "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="
#
# @return [void]
def basic_auth(login, pass)
set_authorization_header(:basic_auth, login, pass)
end
# Sets up the Authorization header with the given token.
#
# @param token [String]
# @param options [Hash] extra token options.
#
# @example
#
# conn.token_auth 'abcdef', foo: 'bar'
# conn.headers['Authorization']
# # => "Token token=\"abcdef\",
# foo=\"bar\""
#
# @return [void]
def token_auth(token, options = nil)
set_authorization_header(:token_auth, token, options)
end
# Sets up a custom Authorization header.
#
# @param type [String] authorization type
# @param token [String, Hash] token. A String value is taken literally, and
# a Hash is encoded into comma-separated key/value pairs.
#
# @example
#
# conn.authorization :Bearer, 'mF_9.B5f-4.1JqM'
# conn.headers['Authorization']
# # => "Bearer mF_9.B5f-4.1JqM"
#
# conn.authorization :Token, token: 'abcdef', foo: 'bar'
# conn.headers['Authorization']
# # => "Token token=\"abcdef\",
# foo=\"bar\""
#
# @return [void]
def authorization(type, token)
set_authorization_header(:authorization, type, token)
end
# Check if the adapter is parallel-capable.
#
# @yield if the adapter isn't parallel-capable, or if no adapter is set yet.
#
# @return [Object, nil] a parallel manager or nil if yielded
# @api private
def default_parallel_manager
@default_parallel_manager ||= begin
adapter = @builder.adapter.klass if @builder.adapter
if support_parallel?(adapter)
adapter.setup_parallel_manager
elsif block_given?
yield
end
end
end
# Determine if this Faraday::Connection can make parallel requests.
#
# @return [Boolean]
def in_parallel?
!!@parallel_manager
end
# Sets up the parallel manager to make a set of requests.
#
# @param manager [Object] The parallel manager that this Connection's
# Adapter uses.
#
# @yield a block to execute multiple requests.
# @return [void]
def in_parallel(manager = nil)
@parallel_manager = manager || default_parallel_manager do
warn 'Warning: `in_parallel` called but no parallel-capable adapter ' \
'on Faraday stack'
warn caller[2, 10].join("\n")
nil
end
yield
@parallel_manager&.run
ensure
@parallel_manager = nil
end
# Sets the Hash proxy options.
#
# @param new_value [Object]
def proxy=(new_value)
@manual_proxy = true
@proxy = new_value ? ProxyOptions.from(new_value) : nil
end
def_delegators :url_prefix, :scheme, :scheme=, :host, :host=, :port, :port=
def_delegator :url_prefix, :path, :path_prefix
# Parses the given URL with URI and stores the individual
# components in this connection. These components serve as defaults for
# requests made by this connection.
#
# @param url [String, URI]
# @param encoder [Object]
#
# @example
#
# conn = Faraday::Connection.new { ... }
# conn.url_prefix = "https://sushi.com/api"
# conn.scheme # => https
# conn.path_prefix # => "/api"
#
# conn.get("nigiri?page=2") # accesses https://sushi.com/api/nigiri
# Sets the path prefix and ensures that it always has a leading
# slash.
#
# @param value [String]
#
# @return [String] the new path prefix
def path_prefix=(value)
url_prefix.path = if value
value = '/' + value unless value[0, 1] == '/'
value
end
end
# Takes a relative url for a request and combines it with the defaults
# set on the connection instance.
#
# @param url [String]
# @param extra_params [Hash]
#
# @example
# conn = Faraday::Connection.new { ... }
# conn.url_prefix = "https://sushi.com/api?token=abc"
# conn.scheme # => https
# conn.path_prefix # => "/api"
#
# conn.build_url("nigiri?page=2")
# # => https://sushi.com/api/nigiri?token=abc&page=2
#
# conn.build_url("nigiri", page: 2)
# # => https://sushi.com/api/nigiri?token=abc&page=2
#
def build_url(url = nil, extra_params = nil)
uri = build_exclusive_url(url)
query_values = params.dup.merge_query(uri.query, options.params_encoder)
query_values.update(extra_params) if extra_params
uri.query =
if query_values.empty?
nil
else
query_values.to_query(options.params_encoder)
end
uri
end
# Builds and runs the Faraday::Request.
#
# @param method [Symbol] HTTP method.
# @param url [String, URI] String or URI to access.
# @param body [Object] The request body that will eventually be converted to
# a string.
# @param headers [Hash] unencoded HTTP header key/value pairs.
#
# @return [Faraday::Response]
def run_request(method, url, body, headers)
unless METHODS.include?(method)
raise ArgumentError, "unknown http method: #{method}"
end
# Resets temp_proxy
@temp_proxy = proxy_for_request(url)
request = build_request(method) do |req|
req.options = req.options.merge(proxy: @temp_proxy)
req.url(url) if url
req.headers.update(headers) if headers
req.body = body if body
yield(req) if block_given?
end
builder.build_response(self, request)
end
# Creates and configures the request object.
#
# @param method [Symbol]
#
# @yield [Faraday::Request] if block given
# @return [Faraday::Request]
def build_request(method)
Request.create(method) do |req|
req.params = params.dup
req.headers = headers.dup
req.options = options
yield(req) if block_given?
end
end
# Build an absolute URL based on url_prefix.
#
# @param url [String, URI]
# @param params [Faraday::Utils::ParamsHash] A Faraday::Utils::ParamsHash to
# replace the query values
# of the resulting url (default: nil).
#
# @return [URI]
def build_exclusive_url(url = nil, params = nil, params_encoder = nil)
url = nil if url.respond_to?(:empty?) && url.empty?
base = url_prefix
if url && base.path && base.path !~ %r{/$}
base = base.dup
base.path = base.path + '/' # ensure trailing slash
end
uri = url ? base + url : base
if params
uri.query = params.to_query(params_encoder || options.params_encoder)
end
# rubocop:disable Style/SafeNavigation
uri.query = nil if uri.query && uri.query.empty?
# rubocop:enable Style/SafeNavigation
uri
end
# Creates a duplicate of this Faraday::Connection.
#
# @api private
#
# @return [Faraday::Connection]
def dup
self.class.new(build_exclusive_url,
headers: headers.dup,
params: params.dup,
builder: builder.dup,
ssl: ssl.dup,
request: options.dup)
end
# Yields username and password extracted from a URI if they both exist.
#
# @param uri [URI]
# @yield [username, password] any username and password
# @yieldparam username [String] any username from URI
# @yieldparam password [String] any password from URI
# @return [void]
# @api private
def with_uri_credentials(uri)
return unless uri.user && uri.password
yield(Utils.unescape(uri.user), Utils.unescape(uri.password))
end
def set_authorization_header(header_type, *args)
header = Faraday::Request
.lookup_middleware(header_type)
.header(*args)
headers[Faraday::Request::Authorization::KEY] = header
end
def proxy_from_env(url)
return if Faraday.ignore_env_proxy
uri = nil
if URI.parse('').respond_to?(:find_proxy)
case url
when String
uri = Utils.URI(url)
uri = URI.parse("#{uri.scheme}://#{uri.hostname}").find_proxy
when URI
uri = url.find_proxy
when nil
uri = find_default_proxy
end
else
warn 'no_proxy is unsupported' if ENV['no_proxy'] || ENV['NO_PROXY']
uri = find_default_proxy
end
ProxyOptions.from(uri) if uri
end
def find_default_proxy
uri = ENV['http_proxy']
return unless uri && !uri.empty?
uri = 'http://' + uri if uri !~ /^http/i
uri
end
def proxy_for_request(url)
return proxy if @manual_proxy
if url && Utils.URI(url).absolute?
proxy_from_env(url)
else
proxy
end
end
def support_parallel?(adapter)
adapter&.respond_to?(:supports_parallel?) && adapter&.supports_parallel?
end
end
|
SecureBrain/ruby_apk | lib/android/manifest.rb | Android.Manifest.label | ruby | def label(lang=nil)
label = @doc.elements['/manifest/application'].attributes['label']
if label.nil?
# application element has no label attributes.
# so looking for activites that has label attribute.
activities = @doc.elements['/manifest/application'].find{|e| e.name == 'activity' && !e.attributes['label'].nil? }
label = activities.nil? ? nil : activities.first.attributes['label']
end
unless @rsc.nil?
if /^@(\w+\/\w+)|(0x[0-9a-fA-F]{8})$/ =~ label
opts = {}
opts[:lang] = lang unless lang.nil?
label = @rsc.find(label, opts)
end
end
label
end | application label
@param [String] lang language code like 'ja', 'cn', ...
@return [String] application label string(if resouce is provided), or label resource id
@return [nil] when label is not found
@since 0.5.1 | train | https://github.com/SecureBrain/ruby_apk/blob/405b6af165722c6b547ad914dfbb78fdc40e6ef7/lib/android/manifest.rb#L221-L237 | class Manifest
APPLICATION_TAG = '/manifest/application'
# <activity>, <service>, <receiver> or <provider> element in <application> element of the manifest file.
class Component
# component types
TYPES = ['service', 'activity', 'receiver', 'provider']
# the element is valid Component element or not
# @param [REXML::Element] elem xml element
# @return [Boolean]
def self.valid?(elem)
TYPES.include?(elem.name.downcase)
rescue => e
false
end
# @return [String] type string in TYPES
attr_reader :type
# @return [String] component name
attr_reader :name
# @return [Array<Manifest::IntentFilter>]
attr_reader :intent_filters
# @return [Array<Manifest::Meta>]
attr_reader :metas
# @return [REXML::Element]
attr_reader :elem
# @param [REXML::Element] elem target element
# @raise [ArgumentError] when elem is invalid.
def initialize(elem)
raise ArgumentError unless Component.valid?(elem)
@elem = elem
@type = elem.name
@name = elem.attributes['name']
@intent_filters = []
unless elem.elements['intent-filter'].nil?
elem.elements['intent-filter'].each do |e|
next unless e.instance_of? REXML::Element
@intent_filters << IntentFilter.parse(e)
end
end
@metas = []
elem.each_element('meta-data') do |e|
@metas << Meta.new(e)
end
end
end
# intent-filter element in components
module IntentFilter
# parse inside of intent-filter element
# @param [REXML::Element] elem target element
# @return [IntentFilter::Action, IntentFilter::Category, IntentFilter::Data]
# intent-filter element
def self.parse(elem)
case elem.name
when 'action'
Action.new(elem)
when 'category'
Category.new(elem)
when 'data'
Data.new(elem)
else
nil
end
end
# intent-filter action class
class Action
# @return [String] action name of intent-filter
attr_reader :name
# @return [String] action type of intent-filter
attr_reader :type
def initialize(elem)
@type = 'action'
@name = elem.attributes['name']
end
end
# intent-filter category class
class Category
# @return [String] category name of intent-filter
attr_reader :name
# @return [String] category type of intent-filter
attr_reader :type
def initialize(elem)
@type = 'category'
@name = elem.attributes['name']
end
end
# intent-filter data class
class Data
# @return [String]
attr_reader :type
# @return [String]
attr_reader :host
# @return [String]
attr_reader :mime_type
# @return [String]
attr_reader :path
# @return [String]
attr_reader :path_pattern
# @return [String]
attr_reader :path_prefix
# @return [String]
attr_reader :port
# @return [String]
attr_reader :scheme
def initialize(elem)
@type = 'data'
@host = elem.attributes['host']
@mime_type = elem.attributes['mimeType']
@path = elem.attributes['path']
@path_pattern = elem.attributes['pathPattern']
@path_prefix = elem.attributes['pathPrefix']
@port = elem.attributes['port']
@scheme = elem.attributes['scheme']
end
end
end
# meta information class
class Meta
# @return [String]
attr_reader :name
# @return [String]
attr_reader :resource
# @return [String]
attr_reader :value
def initialize(elem)
@name = elem.attributes['name']
@resource = elem.attributes['resource']
@value = elem.attributes['value']
end
end
#################################
# Manifest class definitions
#################################
#
# @return [REXML::Document] manifest xml
attr_reader :doc
# @param [String] data binary data of AndroidManifest.xml
def initialize(data, rsc=nil)
parser = AXMLParser.new(data)
@doc = parser.parse
@rsc = rsc
end
# used permission array
# @return [Array<String>] permission names
# @note return empty array when the manifest includes no use-parmission element
def use_permissions
perms = []
@doc.each_element('/manifest/uses-permission') do |elem|
perms << elem.attributes['name']
end
perms.uniq
end
# @return [Array<Android::Manifest::Component>] all components in apk
# @note return empty array when the manifest include no components
def components
components = []
unless @doc.elements['/manifest/application'].nil?
@doc.elements['/manifest/application'].each do |elem|
components << Component.new(elem) if Component.valid?(elem)
end
end
components
end
# application package name
# @return [String]
def package_name
@doc.root.attributes['package']
end
# application version code
# @return [Integer]
def version_code
@doc.root.attributes['versionCode'].to_i
end
# application version name
# @return [String]
def version_name(lang=nil)
vername = @doc.root.attributes['versionName']
unless @rsc.nil?
if /^@(\w+\/\w+)|(0x[0-9a-fA-F]{8})$/ =~ vername
opts = {}
opts[:lang] = lang unless lang.nil?
vername = @rsc.find(vername, opts)
end
end
vername
end
# @return [Integer] minSdkVersion in uses element
def min_sdk_ver
@doc.elements['/manifest/uses-sdk'].attributes['minSdkVersion'].to_i
end
# application label
# @param [String] lang language code like 'ja', 'cn', ...
# @return [String] application label string(if resouce is provided), or label resource id
# @return [nil] when label is not found
# @since 0.5.1
# return xml as string format
# @param [Integer] indent size(bytes)
# @return [String] raw xml string
def to_xml(indent=4)
xml =''
formatter = REXML::Formatters::Pretty.new(indent)
formatter.write(@doc.root, xml)
xml
end
end
|
agios/simple_form-dojo | lib/simple_form-dojo/dojo_props_methods.rb | SimpleFormDojo.DojoPropsMethods.get_and_merge_dojo_props! | ruby | def get_and_merge_dojo_props!
add_dojo_options_to_dojo_props
if object.id.present?
add_dojo_compliant_id
else
input_html_options["id"] = nil #let dojo generate internal id
end
input_html_options[:'data-dojo-props'] = SimpleFormDojo::FormBuilder.encode_as_dojo_props(@dojo_props) if !@dojo_props.blank?
end | Retrieves and merges all dojo_props | train | https://github.com/agios/simple_form-dojo/blob/c4b134f56f4cb68cba81d583038965360c70fba4/lib/simple_form-dojo/dojo_props_methods.rb#L5-L13 | module DojoPropsMethods
##
# Retrieves and merges all dojo_props
private
##
# Retrieves dojo props from :dojo_html => {} options
def add_dojo_options_to_dojo_props
@dojo_props ||= {}
@dojo_props.merge!(html_options_for(:dojo, []))
end
def tag_id index = nil
id = sanitized_object_name
id << "_#{object.id}"
id << index if index
id << "_#{sanitized_attribute_name}"
id
end
def add_dojo_compliant_id
opts = input_html_options
if opts.has_key?("index")
opts["id"] = opts.fetch("id"){ tag_id opts["index"] }
opts.delete("index")
elsif defined?(@auto_index)
opts["id"] = opts.fetch("id"){ tag_id @auto_index }
else
opts["id"] = opts.fetch("id"){ tag_id }
end
end
def sanitized_attribute_name
@sanitized_attribute_name ||= attribute_name.to_s.sub(/\?$/,"")
end
def sanitized_object_name
@sanitized_object_name ||= object_name.to_s.gsub(/\]\[|[^-a-zA-Z0-9:.]/, "_").sub(/_$/, "")
end
end
|
zhimin/rwebspec | lib/rwebspec-webdriver/web_browser.rb | RWebSpec.WebBrowser.submit | ruby | def submit(buttonName = nil)
if (buttonName.nil?) then
buttons.each { |button|
next if button.type != 'submit'
button.click
return
}
else
click_button_with_name(buttonName)
end
end | submit first submit button | train | https://github.com/zhimin/rwebspec/blob/aafccee2ba66d17d591d04210067035feaf2f892/lib/rwebspec-webdriver/web_browser.rb#L590-L600 | class WebBrowser
include ElementLocator
attr_accessor :context
def initialize(base_url = nil, existing_browser = nil, options = {})
default_options = {:speed => "zippy",
:visible => true,
:highlight_colour => 'yellow',
:close_others => true
}
options = default_options.merge options
@context = Context.new base_url if base_url
options[:browser] ||= "ie" if RUBY_PLATFORM =~ /mingw/
case options[:browser].to_s.downcase
when "firefox"
initialize_firefox_browser(existing_browser, base_url, options)
when "chrome"
initialize_chrome_browser(existing_browser, base_url, options)
when "safari"
initialize_safari_browser(existing_browser, base_url, options)
when "ie"
initialize_ie_browser(existing_browser, options)
when "htmlunit"
initialize_htmlunit_browser(base_url, options)
end
begin
if options[:resize_to] && options[:resize_to].class == Array
@browser.manage.window.resize_to(options[:resize_to][0], options[:resize_to][1])
end
rescue => e
puts "[ERROR] failed to resize => #{options[:resize_to]}"
end
end
def initialize_firefox_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :firefox
@browser.navigate.to base_url
end
def initialize_chrome_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :chrome
@browser.navigate.to base_url
end
def initialize_safari_browser(existing_browser, base_url, options)
if existing_browser then
@browser = existing_browser
return
end
@browser = Selenium::WebDriver.for :safari
@browser.navigate.to base_url
end
def initialize_htmlunit_browser(base_url, options)
require 'json'
caps = Selenium::WebDriver::Remote::Capabilities.htmlunit(:javascript_enabled => false)
client = Selenium::WebDriver::Remote::Http::Default.new
# client.proxy = Selenium::WebDriver::Proxy.new(:http => "web-proxy.qdot.qld.gov.au:3128")
@browser = Selenium::WebDriver.for(:remote, :http_client => client , :desired_capabilities => caps)
if options[:go]
@browser.navigate.to(base_url)
end
end
def initialize_ie_browser(existing_browser, options)
if existing_browser then
@browser = existing_browser
if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
@browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
@browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
else
@browser.speed = :zippy
end
return @browser
end
@browser = Selenium::WebDriver.for :ie
# if $TESTWISE_EMULATE_TYPING && $TESTWISE_TYPING_SPEED then
# @browser.set_slow_speed if $TESTWISE_TYPING_SPEED == 'slow'
# @browser.set_fast_speed if $TESTWISE_TYPING_SPEED == 'fast'
# else
# @browser.speed = :zippy
# end
# @browser.visible = options[:visible] unless $HIDE_IE
# #NOTE: close_others fails
# if RUBY_VERSION =~ /^1\.8/ && options[:close_others] then
# @browser.close_others
# else
# puts "close other browser instances not working yet in Ruby 1.9.1 version of Watir"
# end
end
# TODO resuse not working yet
def self.reuse(base_url, options)
if self.is_windows?
WebBrowser.new(base_url, nil, options)
else
WebBrowser.new(base_url, nil, options)
end
end
# for popup windows
def self.new_from_existing(underlying_browser, web_context = nil)
return WebBrowser.new(web_context ? web_context.base_url : nil, underlying_browser, {:close_others => false})
end
def find_element(* args)
@browser.send("find_element", *args)
end
def find_elements(* args)
@browser.send("find_elements", *args)
end
##
# Delegate to WebDriver
#
[:button, :cell, :checkbox, :div, :form, :frame, :h1, :h2, :h3, :h4, :h5, :h6, :hidden, :image, :li, :link, :map, :pre, :row, :radio, :select_list, :span, :table, :text_field, :paragraph, :file_field, :label].each do |method|
tag_name = method
define_method method do |* args|
if args.size == 2 then
find_element(args[0].to_sym, args[1])
end
end
end
alias td cell
alias check_box checkbox # seems watir doc is wrong, checkbox not check_box
alias tr row
# Wrapp of area to support Firefox and Watir
def area(* args)
raise "not implemented for Selenium"
end
def modal_dialog(how=nil, what=nil)
@browser.modal_dialog(how, what)
end
# This is the main method for accessing a generic element with a given attibute
# * how - symbol - how we access the element. Supports all values except :index and :xpath
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns an Watir::Element object
#
# Typical Usage
#
# element(:class, /foo/) # access the first element with class 'foo'. We can use a string in place of the regular expression
# element(:id, "11") # access the first element that matches an id
def element(how, what)
return @browser.element(how, what)
end
# this is the main method for accessing generic html elements by an attribute
#
# Returns a HTMLElements object
#
# Typical usage:
#
# elements(:class, 'test').each { |l| puts l.to_s } # iterate through all elements of a given attribute
# elements(:alt, 'foo')[1].to_s # get the first element of a given attribute
# elements(:id, 'foo').length # show how many elements are foung in the collection
#
def elements(how, what)
return @browser.elements(how, what)
end
def show_all_objects
@browser.show_all_objects
end
# Returns the specified ole object for input elements on a web page.
#
# This method is used internally by Watir and should not be used externally. It cannot be marked as private because of the way mixins and inheritance work in watir
#
# * how - symbol - the way we look for the object. Supported values are
# - :name
# - :id
# - :index
# - :value etc
# * what - string that we are looking for, ex. the name, or id tag attribute or index of the object we are looking for.
# * types - what object types we will look at.
# * value - used for objects that have one name, but many values. ex. radio lists and checkboxes
def locate_input_element(how, what, types, value=nil)
@browser.locate_input_element(how, what, types, value)
end
# This is the main method for accessing map tags - http://msdn.microsoft.com/workshop/author/dhtml/reference/objects/map.asp?frame=true
# * how - symbol - how we access the map,
# * what - string, integer or regular expression - what we are looking for,
#
# Valid values for 'how' are listed in the Watir Wiki - http://wiki.openqa.org/display/WTR/Methods+supported+by+Element
#
# returns a map object
#
# Typical Usage
#
# map(:id, /list/) # access the first map that matches list.
# map(:index,2) # access the second map on the page
# map(:title, "A Picture") # access a map using the tooltip text. See http://msdn.microsoft.com/workshop/author/dhtml/reference/properties/title_1.asp?frame=true
#
def map(how, what=nil)
@browser.map(how, what)
end
def contains_text(text)
@browser.contains_text(text);
end
# return HTML of current web page
def page_source
@browser.page_source
end
alias html_body page_source
alias html page_source
def page_title
@browser.title
end
def text(squeeze_spaces = true)
@browser.find_element(:tag_name, "body").text
end
=begin
# @deprecated
def text_with_sanitize
begin
require 'sanitize'
page_text_string = Sanitize.clean(html)
page_text_string = page_text_string.squeeze(" ") if squeeze_spaces
# remove duplicated (spaces)
return page_text_string
rescue => e
puts "failed to santize html source => text, #{e}"
return @browser.html
end
end
=end
# :links => removed
# :checkboxes => removed
# :radios => removed
# :select_lists => removed
# :buttons => removed
# :divs => removed
[:images, :text_fields, :dls, :dds, :dts, :ems, :lis, :maps, :spans, :strongs, :ps, :pres, :labels].each do |method|
define_method method do
@browser.send(method)
end
end
def links
@browser.find_elements(:tag_name, "a")
end
def checkboxes
@browser.find_elements(:xpath, "//input[@type='checkbox']")
end
def radios
@browser.find_elements(:xpath, "//input[@type='radio']")
end
def select_lists
@browser.find_elements(:tag_name, "select")
end
def buttons
button_array = @browser.find_elements(:tag_name, "button") + @browser.find_elements(:xpath, "//input[@type='submit']") + @browser.find_elements(:xpath, "//input[@type='button']")
return button_array
end
def divs
@browser.find_elements(:tag_name, "divs")
end
# current url
def current_url
@browser.current_url
end
alias url current_url
def base_url=(new_base_url)
if @context
@conext.base_url = new_base_url
return
end
@context = Context.new base_url
end
def driver
@browser
end
def underlying_browser
@browser
end
def is_ie?
@browser.browser.to_s == "ie"
end
def is_firefox?
@browser.browser.to_s == "firefox"
end
# Close the browser window. Useful for automated test suites to reduce
# test interaction.
def close_browser
@browser.quit
sleep 1
end
alias close close_browser
#TODO determine browser type, check FireWatir support or not
def close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def self.close_all_browsers
puts "[WARN] not supported yet in RWebSpec-WebDriver"
end
def full_url(relative_url)
if @context && @context.base_url
@context.base_url + relative_url
else
relative_url
end
end
# Crahses where http:///ssshtttp:///
def begin_at(relative_url)
if relative_url =~ /\s*^http/
@browser.navigate.to relative_url
else
@browser.navigate.to full_url(relative_url)
end
end
def browser_opened?
begin
@browser != nil
rescue => e
return false
end
end
# Some browsers (i.e. IE) need to be waited on before more actions can be
# performed. Most action methods in Watir::Simple already call this before
# and after.
def wait_for_browser
# NOTE: no need any more
end
# A convenience method to wait at both ends of an operation for the browser
# to catch up.
def wait_before_and_after
wait_for_browser
yield
wait_for_browser
end
[:focus, :close_others].each do |method|
define_method(method) do
@browser.send(method)
end
end
def forward
@browser.navigate().forward
end
alias go_forward forward
# TODO can't browse back if on invalid page
def back
@browser.navigate.back
end
alias go_back back
def refresh
@browser.navigate().refresh
end
alias refresh_page refresh
# Go to a page
# Usage:
# open_browser(:base_url => "http://www.itest2.com")
# ....
# goto_page("/purchase") # full url => http://www.itest.com/purchase
def goto_page(page)
goto_url full_url(page);
end
# Go to a URL directly
# goto_url("http://www.itest2.com/downloads")
def goto_url(url)
@browser.navigate.to url
end
# text fields
def enter_text_into_field_with_name(name, text)
the_element = find_element(:name, name)
if the_element.tag_name == "input" || the_element.tag_name == "textarea" then
the_element.clear
the_element.send_keys(text)
else
elements = find_elements(:name, name)
if elements.size == 1 then
elements[0].send_keys(text)
else
element_set = elements.select {|x| x.tag_name == "textarea" || (x.tag_name == "input" && x.attribute("text")) }
element_set[0].send_keys(text)
end
end
return true
end
alias set_form_element enter_text_into_field_with_name
alias enter_text enter_text_into_field_with_name
alias set_hidden_field set_form_element
#links
def click_link_with_id(link_id, opts = {})
if opts && opts[:index]
elements = find_elements(:id, link_id)
focus_on_element(elements[opts[:index]-1])
elements[opts[:index]-1].click
else
focus_on_element(find_element(:id, link_id))
find_element(:id, link_id).click
end
end
def focus_on_element(elem)
begin
elem.send_keys("")
rescue => e
# ignore for example, an on hover table might not be ablet to send keys to
end
end
##
# click_link_with_text("Login")
# click_link_with_text("Show", :index => 2)
def click_link_with_text(link_text, opts = {})
if opts && opts[:index]
elements = find_elements(:link_text, link_text)
elements[opts[:index]-1].click
else
find_element(:link_text, link_text).click
end
end
alias click_link click_link_with_text
# Click a button with give HTML id
# Usage:
# click_button_with_id("btn_sumbit")
# click_button_with_id("btn_sumbit", :index => 2) # the secone link with same id, not good gractice in HTML
def click_button_with_id(id, opts = {})
if opts && opts[:index] && opts[:index].to_i() > 0
elements = find_elements(:id, id)
the_index = opts[:index].to_i() - 1
first_match = elements[the_index]
focus_on_element(first_match)
first_match.click
else
focus_on_element(find_element(:id, id))
find_element(:id, id).click
end
end
# Click a button with give name
# Usage:
# click_button_with_name("confirm")
# click_button_with_name("confirm", :index => 2)
def click_button_with_name(name, opts={})
find_element(:name, name).click
end
# Click a button with caption
#
# TODO: Caption is same as value
#
# Usage:
# click_button_with_caption("Confirm payment")
def click_button_with_caption(caption, opts={})
all_buttons = button_elements
matching_buttons = all_buttons.select{|x| x.attribute('value') == caption}
if matching_buttons.size > 0
if opts && opts[:index]
the_index = opts[:index].to_i() - 1
puts "Call matching buttons: #{matching_buttons.inspect} => #{the_index}"
first_match = matching_buttons[the_index]
first_match.click
else
the_button = matching_buttons[0]
the_button.click
end
else
raise "No button with value: #{caption} found"
end
end
alias click_button click_button_with_caption
alias click_button_with_text click_button_with_caption
# click_button_with_caption("Confirm payment")
def click_button_with_value(value, opts={})
all_buttons = button_elements
if opts && opts[:index]
all_buttons.select{|x| x.attribute('value') == caption}[index]
else
all_buttons.each do |button|
if button.attribute('value') == value then
button.click
return
end
end
end
end
# Click image buttion with image source name
#
# For an image submit button <input name="submit" type="image" src="/images/search_button.gif">
# click_button_with_image("search_button.gif")
def click_button_with_image_src_contains(image_filename)
all_buttons = button_elements
found = nil
all_buttons.select do |x|
if x["src"] =~ /#{Regexp.escape(image_filename)}/
found = x
break
end
end
raise "not image button with src: #{image_filename} found" if found.nil?
found.click
end
alias click_button_with_image click_button_with_image_src_contains
# Select a dropdown list by name
# Usage:
# select_option("country", "Australia")
def select_option(selectName, text)
Selenium::WebDriver::Support::Select.new(find_element(:name, selectName)).select_by(:text, text)
end
# submit first submit button
def submit(buttonName = nil)
if (buttonName.nil?) then
buttons.each { |button|
next if button.type != 'submit'
button.click
return
}
else
click_button_with_name(buttonName)
end
end
# Check a checkbox
# Usage:
# check_checkbox("agree")
# check_checkbox("agree", "true")
def check_checkbox(checkBoxName, values=nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && !elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click unless the_checkbox.selected?
end
end
def find_checkboxes_by_name(checkBoxName)
elements = find_elements(:name, checkBoxName)
elements.reject! {|x| x.tag_name != "input" || x["type"] != "checkbox"}
raise "No checkbox with name #{checkBoxName} found" if elements.empty?
return elements
end
# Uncheck a checkbox
# Usage:
# uncheck_checkbox("agree")
# uncheck_checkbox("agree", "false")
def uncheck_checkbox(checkBoxName, values = nil)
elements = find_checkboxes_by_name(checkBoxName)
if values
values.class == Array ? arys = values : arys = [values]
arys.each { |cbx_value|
elements.each do |elem|
elem.click if elem.attribute('value') == cbx_value && elem.selected?
end
}
else
the_checkbox = elements[0]
the_checkbox.click if the_checkbox.selected?
end
end
# Click a radio button
# Usage:
# click_radio_option("country", "Australia")
def click_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.click
end
alias click_radio_button click_radio_option
# Clear a radio button
# Usage:
# click_radio_option("country", "Australia")
def clear_radio_option(radio_group, radio_option)
the_radio_button = find_element(:xpath, "//input[@type='radio' and @name='#{radio_group}' and @value='#{radio_option}']")
the_radio_button.clear
end
alias clear_radio_button clear_radio_option
def element_by_id(elem_id)
@browser.find_element(:id, elem_id)
end
def element_value(elementId)
find_element(:id, elementId).attribute('value')
end
def element_source(elementId)
elem = element_by_id(elementId)
assert_not_nil(elem, "HTML element: #{elementId} not exists")
elem.innerHTML
end
def select_file_for_upload(file_field_name, file_path)
is_on_windows = RUBY_PLATFORM.downcase.include?("mingw") || RUBY_PLATFORM.downcase.include?("mswin")
normalized_file_path = is_on_windows ? file_path.gsub("/", "\\") : file_path
find_element(:name, file_field_name).click
find_element(:name, file_field_name).send_keys(normalized_file_path)
end
def start_window(url = nil)
@browser.start_window(url);
end
# Attach to existing browser
#
# Usage:
# WebBrowser.attach_browser(:title, "iTest2")
# WebBrowser.attach_browser(:url, "http://www.itest2.com")
# WebBrowser.attach_browser(:url, "http://www.itest2.com", {:browser => "Firefox", :base_url => "http://www.itest2.com"})
# WebBrowser.attach_browser(:title, /agileway\.com\.au\/attachment/) # regular expression
def self.attach_browser(how, what, options={})
raise "Attach browser not implemented for Selenium, If you debug in TestWise, make sure running a test first to start browser, then you can attach."
end
# Attach to a popup window, to be removed
#
# Typical usage
# new_popup_window(:url => "http://www.google.com/a.pdf")
def new_popup_window(options, browser = "ie")
raise "not implemented"
end
# ---
# For deubgging
# ---
def dump_response(stream = nil)
stream.nil? ? puts(page_source) : stream.puts(page_source)
end
# A Better Popup Handler using the latest Watir version. Posted by [email protected]
#
# http://wiki.openqa.org/display/WTR/FAQ#FAQ-HowdoIattachtoapopupwindow%3F
#
def start_clicker(button, waitTime= 9, user_input=nil)
raise "Not support when using Selenium WebDriver, try alternative approach."
end
# return underlying browser
def ie
@browser.class == "internet_explorer" ? @browser : nil;
end
# return underlying firefox browser object, raise error if not running using Firefox
def firefox
is_firefox? ? @browser : nil;
end
def htmlunit
raise "can't call this as it is configured to use Celerity" unless RUBY_PLATFORM =~ /java/
@browser
end
# Save current web page source to file
# usage:
# save_page("/tmp/01.html")
# save_page() => # will save to "20090830112200.html"
def save_page(file_name = nil)
file_name ||= Time.now.strftime("%Y%m%d%H%M%S") + ".html"
puts "about to save page: #{File.expand_path(file_name)}" if $DEBUG
File.open(file_name, "w").puts page_source
end
# Verify the next page following an operation.
#
# Typical usage:
# browser.expect_page HomePage
def expect_page(page_clazz, argument = nil)
if argument
page_clazz.new(self, argument)
else
page_clazz.new(self)
end
end
# is it running in MS Windows platforms?
def self.is_windows?
RUBY_PLATFORM.downcase.include?("mswin") or RUBY_PLATFORM.downcase.include?("mingw")
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.