repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
sequencelengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
---|---|---|---|---|---|---|---|---|---|
datacamp/protowhat | protowhat/checks/check_simple.py | has_chosen | python | def has_chosen(state, correct, msgs):
ctxt = {}
exec(state.student_code, globals(), ctxt)
sel_indx = ctxt["selected_option"]
if sel_indx != correct:
state.report(Feedback(msgs[sel_indx - 1]))
else:
state.reporter.success_msg = msgs[correct - 1]
return state | Verify exercises of the type MultipleChoiceExercise
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
correct: index of correct option, where 1 is the first option.
msgs : list of feedback messages corresponding to each option.
:Example:
The following SCT is for a multiple choice exercise with 2 options, the first
of which is correct.::
Ex().has_chosen(1, ['Correct!', 'Incorrect. Try again!']) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_simple.py#L4-L27 | [
"def report(self, feedback: Feedback):\n if feedback.highlight is None and self is not getattr(self, \"root_state\", None):\n feedback.highlight = self.student_ast\n test = Fail(feedback)\n\n return self.do_test(test)\n"
] | from protowhat.Feedback import Feedback
def success_msg(state, msg):
"""
Changes the success message to display if submission passes.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
msg : feedback message if student and solution ASTs don't match
:Example:
The following SCT changes the success message::
Ex().success_msg("You did it!")
"""
state.reporter.success_msg = msg
return state
|
datacamp/protowhat | protowhat/checks/check_logic.py | multi | python | def multi(state, *tests):
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state | Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_logic.py#L8-L36 | [
"def iter_tests(tests):\n for arg in tests:\n if arg is None:\n continue\n\n # when input is a single test, make iterable\n if callable(arg):\n arg = [arg]\n\n for test in arg:\n yield test\n"
] | from protowhat.Feedback import Feedback
from protowhat.Test import TestFail
from functools import partial
from protowhat.utils import legacy_signature
@legacy_signature(incorrect_msg='msg')
def check_not(state, *tests, msg):
"""Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail.
"""
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state
def check_or(state, *tests):
"""Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback)
def check_correct(state, check, diagnose):
"""Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
)
"""
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state # todo: add test
def iter_tests(tests):
for arg in tests:
if arg is None:
continue
# when input is a single test, make iterable
if callable(arg):
arg = [arg]
for test in arg:
yield test
def disable_highlighting(state):
"""Disable highlighting in the remainder of the SCT chain.
Include this function if you want to avoid that pythonwhat marks which part of the student submission is incorrect.
"""
return state.to_child(highlighting_disabled=True)
def fail(state, msg="fail"):
"""Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed.
"""
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state
|
datacamp/protowhat | protowhat/checks/check_logic.py | check_not | python | def check_not(state, *tests, msg):
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state | Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail. | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_logic.py#L40-L74 | [
"def iter_tests(tests):\n for arg in tests:\n if arg is None:\n continue\n\n # when input is a single test, make iterable\n if callable(arg):\n arg = [arg]\n\n for test in arg:\n yield test\n"
] | from protowhat.Feedback import Feedback
from protowhat.Test import TestFail
from functools import partial
from protowhat.utils import legacy_signature
def multi(state, *tests):
"""Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state
@legacy_signature(incorrect_msg='msg')
def check_or(state, *tests):
"""Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback)
def check_correct(state, check, diagnose):
"""Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
)
"""
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state # todo: add test
def iter_tests(tests):
for arg in tests:
if arg is None:
continue
# when input is a single test, make iterable
if callable(arg):
arg = [arg]
for test in arg:
yield test
def disable_highlighting(state):
"""Disable highlighting in the remainder of the SCT chain.
Include this function if you want to avoid that pythonwhat marks which part of the student submission is incorrect.
"""
return state.to_child(highlighting_disabled=True)
def fail(state, msg="fail"):
"""Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed.
"""
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state
|
datacamp/protowhat | protowhat/checks/check_logic.py | check_or | python | def check_or(state, *tests):
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback) | Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_logic.py#L77-L114 | [
"def multi(state, *tests):\n \"\"\"Run multiple subtests. Return original state (for chaining).\n\n This function could be thought as an AND statement, since all tests it runs must pass\n\n Args:\n state: State instance describing student and solution code, can be omitted if used with Ex()\n tests: one or more sub-SCTs to run.\n\n :Example:\n The SCT below checks two has_code cases.. ::\n\n Ex().multi(has_code('SELECT'), has_code('WHERE'))\n\n The SCT below uses ``multi`` to 'branch out' to check that\n the SELECT statement has both a WHERE and LIMIT clause.. ::\n\n Ex().check_node('SelectStmt', 0).multi(\n check_edge('where_clause'),\n check_edge('limit_clause')\n )\n \"\"\"\n for test in iter_tests(tests):\n # assume test is function needing a state argument\n # partial state so reporter can test\n state.do_test(partial(test, state))\n\n # return original state, so can be chained\n return state\n",
"def iter_tests(tests):\n for arg in tests:\n if arg is None:\n continue\n\n # when input is a single test, make iterable\n if callable(arg):\n arg = [arg]\n\n for test in arg:\n yield test\n"
] | from protowhat.Feedback import Feedback
from protowhat.Test import TestFail
from functools import partial
from protowhat.utils import legacy_signature
def multi(state, *tests):
"""Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state
@legacy_signature(incorrect_msg='msg')
def check_not(state, *tests, msg):
"""Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail.
"""
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state
def check_correct(state, check, diagnose):
"""Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
)
"""
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state # todo: add test
def iter_tests(tests):
for arg in tests:
if arg is None:
continue
# when input is a single test, make iterable
if callable(arg):
arg = [arg]
for test in arg:
yield test
def disable_highlighting(state):
"""Disable highlighting in the remainder of the SCT chain.
Include this function if you want to avoid that pythonwhat marks which part of the student submission is incorrect.
"""
return state.to_child(highlighting_disabled=True)
def fail(state, msg="fail"):
"""Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed.
"""
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state
|
datacamp/protowhat | protowhat/checks/check_logic.py | check_correct | python | def check_correct(state, check, diagnose):
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state | Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_logic.py#L117-L151 | [
"def multi(state, *tests):\n \"\"\"Run multiple subtests. Return original state (for chaining).\n\n This function could be thought as an AND statement, since all tests it runs must pass\n\n Args:\n state: State instance describing student and solution code, can be omitted if used with Ex()\n tests: one or more sub-SCTs to run.\n\n :Example:\n The SCT below checks two has_code cases.. ::\n\n Ex().multi(has_code('SELECT'), has_code('WHERE'))\n\n The SCT below uses ``multi`` to 'branch out' to check that\n the SELECT statement has both a WHERE and LIMIT clause.. ::\n\n Ex().check_node('SelectStmt', 0).multi(\n check_edge('where_clause'),\n check_edge('limit_clause')\n )\n \"\"\"\n for test in iter_tests(tests):\n # assume test is function needing a state argument\n # partial state so reporter can test\n state.do_test(partial(test, state))\n\n # return original state, so can be chained\n return state\n"
] | from protowhat.Feedback import Feedback
from protowhat.Test import TestFail
from functools import partial
from protowhat.utils import legacy_signature
def multi(state, *tests):
"""Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state
@legacy_signature(incorrect_msg='msg')
def check_not(state, *tests, msg):
"""Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail.
"""
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state
def check_or(state, *tests):
"""Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback)
# todo: add test
def iter_tests(tests):
for arg in tests:
if arg is None:
continue
# when input is a single test, make iterable
if callable(arg):
arg = [arg]
for test in arg:
yield test
def disable_highlighting(state):
"""Disable highlighting in the remainder of the SCT chain.
Include this function if you want to avoid that pythonwhat marks which part of the student submission is incorrect.
"""
return state.to_child(highlighting_disabled=True)
def fail(state, msg="fail"):
"""Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed.
"""
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state
|
datacamp/protowhat | protowhat/checks/check_logic.py | fail | python | def fail(state, msg="fail"):
_msg = state.build_message(msg)
state.report(Feedback(_msg, state))
return state | Always fails the SCT, with an optional msg.
This function takes a single argument, ``msg``, that is the feedback given to the student.
Note that this would be a terrible idea for grading submissions, but may be useful while writing SCTs.
For example, failing a test will highlight the code as if the previous test/check had failed. | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_logic.py#L175-L185 | null | from protowhat.Feedback import Feedback
from protowhat.Test import TestFail
from functools import partial
from protowhat.utils import legacy_signature
def multi(state, *tests):
"""Run multiple subtests. Return original state (for chaining).
This function could be thought as an AND statement, since all tests it runs must pass
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run.
:Example:
The SCT below checks two has_code cases.. ::
Ex().multi(has_code('SELECT'), has_code('WHERE'))
The SCT below uses ``multi`` to 'branch out' to check that
the SELECT statement has both a WHERE and LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).multi(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
for test in iter_tests(tests):
# assume test is function needing a state argument
# partial state so reporter can test
state.do_test(partial(test, state))
# return original state, so can be chained
return state
@legacy_signature(incorrect_msg='msg')
def check_not(state, *tests, msg):
"""Run multiple subtests that should fail. If all subtests fail, returns original state (for chaining)
- This function is currently only tested in working with ``has_code()`` in the subtests.
- This function can be thought as a ``NOT(x OR y OR ...)`` statement, since all tests it runs must fail
- This function can be considered a direct counterpart of multi.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
*tests: one or more sub-SCTs to run
msg: feedback message that is shown in case not all tests specified in ``*tests`` fail.
:Example:
Thh SCT below runs two has_code cases.. ::
Ex().check_not(
has_code('INNER'),
has_code('OUTER'),
incorrect_msg="Don't use `INNER` or `OUTER`!"
)
If students use ``INNER (JOIN)`` or ``OUTER (JOIN)`` in their code, this test will fail.
"""
for test in iter_tests(tests):
try:
test(state)
except TestFail:
# it fails, as expected, off to next one
continue
return state.report(Feedback(msg))
# return original state, so can be chained
return state
def check_or(state, *tests):
"""Test whether at least one SCT passes.
If all of the tests fail, the feedback of the first test will be presented to the student.
Args:
state: State instance describing student and solution code, can be omitted if used with Ex()
tests: one or more sub-SCTs to run
:Example:
The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. ::
Ex().check_or(
has_code('SELECT'),
has_code('WHERE')
)
The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. ::
Ex().check_node('SelectStmt', 0).check_or(
check_edge('where_clause'),
check_edge('limit_clause')
)
"""
success = False
first_feedback = None
for test in iter_tests(tests):
try:
multi(state, test)
success = True
except TestFail as e:
if not first_feedback:
first_feedback = e.feedback
if success:
return state # todo: add test
state.report(first_feedback)
def check_correct(state, check, diagnose):
"""Allows feedback from a diagnostic SCT, only if a check SCT fails.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
check: An sct chain that must succeed.
diagnose: An sct chain to run if the check fails.
:Example:
The SCT below tests whether students query result is correct, before running diagnostic SCTs.. ::
Ex().check_correct(
check_result(),
check_node('SelectStmt')
)
"""
feedback = None
try:
multi(state, check)
except TestFail as e:
feedback = e.feedback
# todo: let if from except wrap try-except
# only once teach uses force_diagnose
try:
multi(state, diagnose)
except TestFail as e:
if feedback is not None or state.force_diagnose:
feedback = e.feedback
if feedback is not None:
state.report(feedback)
return state # todo: add test
def iter_tests(tests):
for arg in tests:
if arg is None:
continue
# when input is a single test, make iterable
if callable(arg):
arg = [arg]
for test in arg:
yield test
def disable_highlighting(state):
"""Disable highlighting in the remainder of the SCT chain.
Include this function if you want to avoid that pythonwhat marks which part of the student submission is incorrect.
"""
return state.to_child(highlighting_disabled=True)
|
datacamp/protowhat | protowhat/checks/check_funcs.py | check_node | python | def check_node(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.",
priority=None,
):
df = partial(state.ast_dispatcher, name, priority=priority)
sol_stmt_list = df(state.solution_ast)
try:
sol_stmt = sol_stmt_list[index]
except IndexError:
raise IndexError("Can't get %s statement at index %s" % (name, index))
stu_stmt_list = df(state.student_ast)
try:
stu_stmt = stu_stmt_list[index]
except IndexError:
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
sol_stmt, missing_msg, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
state.report(Feedback(_msg))
action = {
"type": "check_node",
"kwargs": {"name": name, "index": index},
"node": stu_stmt,
}
return state.to_child(
student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,)
) | Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L37-L101 | null | from functools import partial, wraps
from protowhat.Feedback import Feedback
MSG_CHECK_FALLBACK = "Your submission is incorrect. Try again!"
def requires_ast(f):
@wraps(f)
def wrapper(*args, **kwargs):
state = kwargs.get("state", args[0] if len(args) else None)
state_ast = [state.student_ast, state.solution_ast]
# fail if no ast parser in use
if any(ast is None for ast in state_ast):
raise TypeError(
"Trying to use ast, but it is None. Are you using a parser? {} {}".format(
args, kwargs
)
)
# check whether the parser passed or failed for some code
# if safe_parsing is enabled in the Dispatcher (otherwise an exception would be raised earlier)
ParseError = state.ast_dispatcher.ParseError
parse_fail = any(isinstance(ast, ParseError) for ast in state_ast)
if parse_fail:
return state # skip test
else:
return f(*args, **kwargs) # proceed with test
return wrapper
@requires_ast
@requires_ast
def check_edge(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{field_name}.",
):
"""Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause
"""
try:
sol_attr = getattr(state.solution_ast, name)
if sol_attr and isinstance(sol_attr, list) and index is not None:
sol_attr = sol_attr[index]
except IndexError:
raise IndexError("Can't get %s attribute" % name)
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
state.student_ast, missing_msg, field=name, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
try:
stu_attr = getattr(state.student_ast, name)
if stu_attr and isinstance(stu_attr, list) and index is not None:
stu_attr = stu_attr[index]
except:
state.report(Feedback(_msg))
# fail if attribute exists, but is none only for student
if stu_attr is None and sol_attr is not None:
state.report(Feedback(_msg))
action = {"type": "check_edge", "kwargs": {"name": name, "index": index}}
return state.to_child(
student_ast=stu_attr, solution_ast=sol_attr, history=state.history + (action,)
)
import re
def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state
@requires_ast
def has_equal_ast(
state,
incorrect_msg="Check the {ast_path}. {extra}",
sql=None,
start=["expression", "subquery", "sql_script"][0],
exact=None,
):
"""Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1')
"""
ast = state.ast_dispatcher.ast_mod
sol_ast = state.solution_ast if sql is None else ast.parse(sql, start)
# if sql is set, exact defaults to False.
# if sql not set, exact defaults to True.
if exact is None:
exact = sql is None
stu_rep = repr(state.student_ast)
sol_rep = repr(sol_ast)
def get_str(ast, code, sql):
if sql:
return sql
if isinstance(ast, str):
return ast
try:
return ast.get_text(code)
except:
return None
sol_str = get_str(state.solution_ast, state.solution_code, sql)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code",
extra="The checker expected to find `{}` in there.".format(sol_str)
if sol_str
else "Something is missing.",
)
if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)):
state.report(Feedback(_msg))
return state
def has_parsed_ast(state):
asts = [state.student_ast, state.solution_ast]
if any(isinstance(c, state.ast_dispatcher.ParseError) for c in asts):
state.report(Feedback("AST did not parse"))
return state
|
datacamp/protowhat | protowhat/checks/check_funcs.py | check_edge | python | def check_edge(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{field_name}.",
):
try:
sol_attr = getattr(state.solution_ast, name)
if sol_attr and isinstance(sol_attr, list) and index is not None:
sol_attr = sol_attr[index]
except IndexError:
raise IndexError("Can't get %s attribute" % name)
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
state.student_ast, missing_msg, field=name, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
try:
stu_attr = getattr(state.student_ast, name)
if stu_attr and isinstance(stu_attr, list) and index is not None:
stu_attr = stu_attr[index]
except:
state.report(Feedback(_msg))
# fail if attribute exists, but is none only for student
if stu_attr is None and sol_attr is not None:
state.report(Feedback(_msg))
action = {"type": "check_edge", "kwargs": {"name": name, "index": index}}
return state.to_child(
student_ast=stu_attr, solution_ast=sol_attr, history=state.history + (action,)
) | Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L105-L166 | null | from functools import partial, wraps
from protowhat.Feedback import Feedback
MSG_CHECK_FALLBACK = "Your submission is incorrect. Try again!"
def requires_ast(f):
@wraps(f)
def wrapper(*args, **kwargs):
state = kwargs.get("state", args[0] if len(args) else None)
state_ast = [state.student_ast, state.solution_ast]
# fail if no ast parser in use
if any(ast is None for ast in state_ast):
raise TypeError(
"Trying to use ast, but it is None. Are you using a parser? {} {}".format(
args, kwargs
)
)
# check whether the parser passed or failed for some code
# if safe_parsing is enabled in the Dispatcher (otherwise an exception would be raised earlier)
ParseError = state.ast_dispatcher.ParseError
parse_fail = any(isinstance(ast, ParseError) for ast in state_ast)
if parse_fail:
return state # skip test
else:
return f(*args, **kwargs) # proceed with test
return wrapper
@requires_ast
def check_node(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.",
priority=None,
):
"""Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0)
"""
df = partial(state.ast_dispatcher, name, priority=priority)
sol_stmt_list = df(state.solution_ast)
try:
sol_stmt = sol_stmt_list[index]
except IndexError:
raise IndexError("Can't get %s statement at index %s" % (name, index))
stu_stmt_list = df(state.student_ast)
try:
stu_stmt = stu_stmt_list[index]
except IndexError:
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
sol_stmt, missing_msg, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
state.report(Feedback(_msg))
action = {
"type": "check_node",
"kwargs": {"name": name, "index": index},
"node": stu_stmt,
}
return state.to_child(
student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,)
)
@requires_ast
import re
def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state
@requires_ast
def has_equal_ast(
state,
incorrect_msg="Check the {ast_path}. {extra}",
sql=None,
start=["expression", "subquery", "sql_script"][0],
exact=None,
):
"""Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1')
"""
ast = state.ast_dispatcher.ast_mod
sol_ast = state.solution_ast if sql is None else ast.parse(sql, start)
# if sql is set, exact defaults to False.
# if sql not set, exact defaults to True.
if exact is None:
exact = sql is None
stu_rep = repr(state.student_ast)
sol_rep = repr(sol_ast)
def get_str(ast, code, sql):
if sql:
return sql
if isinstance(ast, str):
return ast
try:
return ast.get_text(code)
except:
return None
sol_str = get_str(state.solution_ast, state.solution_code, sql)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code",
extra="The checker expected to find `{}` in there.".format(sol_str)
if sol_str
else "Something is missing.",
)
if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)):
state.report(Feedback(_msg))
return state
def has_parsed_ast(state):
asts = [state.student_ast, state.solution_ast]
if any(isinstance(c, state.ast_dispatcher.ParseError) for c in asts):
state.report(Feedback("AST did not parse"))
return state
|
datacamp/protowhat | protowhat/checks/check_funcs.py | has_code | python | def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state | Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10) | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L172-L243 | [
"def get_text(ast, code):\n if isinstance(ast, ParseError):\n return code\n try:\n return ast.get_text(code)\n except:\n return code\n"
] | from functools import partial, wraps
from protowhat.Feedback import Feedback
MSG_CHECK_FALLBACK = "Your submission is incorrect. Try again!"
def requires_ast(f):
@wraps(f)
def wrapper(*args, **kwargs):
state = kwargs.get("state", args[0] if len(args) else None)
state_ast = [state.student_ast, state.solution_ast]
# fail if no ast parser in use
if any(ast is None for ast in state_ast):
raise TypeError(
"Trying to use ast, but it is None. Are you using a parser? {} {}".format(
args, kwargs
)
)
# check whether the parser passed or failed for some code
# if safe_parsing is enabled in the Dispatcher (otherwise an exception would be raised earlier)
ParseError = state.ast_dispatcher.ParseError
parse_fail = any(isinstance(ast, ParseError) for ast in state_ast)
if parse_fail:
return state # skip test
else:
return f(*args, **kwargs) # proceed with test
return wrapper
@requires_ast
def check_node(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.",
priority=None,
):
"""Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0)
"""
df = partial(state.ast_dispatcher, name, priority=priority)
sol_stmt_list = df(state.solution_ast)
try:
sol_stmt = sol_stmt_list[index]
except IndexError:
raise IndexError("Can't get %s statement at index %s" % (name, index))
stu_stmt_list = df(state.student_ast)
try:
stu_stmt = stu_stmt_list[index]
except IndexError:
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
sol_stmt, missing_msg, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
state.report(Feedback(_msg))
action = {
"type": "check_node",
"kwargs": {"name": name, "index": index},
"node": stu_stmt,
}
return state.to_child(
student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,)
)
@requires_ast
def check_edge(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{field_name}.",
):
"""Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause
"""
try:
sol_attr = getattr(state.solution_ast, name)
if sol_attr and isinstance(sol_attr, list) and index is not None:
sol_attr = sol_attr[index]
except IndexError:
raise IndexError("Can't get %s attribute" % name)
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
state.student_ast, missing_msg, field=name, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
try:
stu_attr = getattr(state.student_ast, name)
if stu_attr and isinstance(stu_attr, list) and index is not None:
stu_attr = stu_attr[index]
except:
state.report(Feedback(_msg))
# fail if attribute exists, but is none only for student
if stu_attr is None and sol_attr is not None:
state.report(Feedback(_msg))
action = {"type": "check_edge", "kwargs": {"name": name, "index": index}}
return state.to_child(
student_ast=stu_attr, solution_ast=sol_attr, history=state.history + (action,)
)
import re
@requires_ast
def has_equal_ast(
state,
incorrect_msg="Check the {ast_path}. {extra}",
sql=None,
start=["expression", "subquery", "sql_script"][0],
exact=None,
):
"""Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1')
"""
ast = state.ast_dispatcher.ast_mod
sol_ast = state.solution_ast if sql is None else ast.parse(sql, start)
# if sql is set, exact defaults to False.
# if sql not set, exact defaults to True.
if exact is None:
exact = sql is None
stu_rep = repr(state.student_ast)
sol_rep = repr(sol_ast)
def get_str(ast, code, sql):
if sql:
return sql
if isinstance(ast, str):
return ast
try:
return ast.get_text(code)
except:
return None
sol_str = get_str(state.solution_ast, state.solution_code, sql)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code",
extra="The checker expected to find `{}` in there.".format(sol_str)
if sol_str
else "Something is missing.",
)
if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)):
state.report(Feedback(_msg))
return state
def has_parsed_ast(state):
asts = [state.student_ast, state.solution_ast]
if any(isinstance(c, state.ast_dispatcher.ParseError) for c in asts):
state.report(Feedback("AST did not parse"))
return state
|
datacamp/protowhat | protowhat/checks/check_funcs.py | has_equal_ast | python | def has_equal_ast(
state,
incorrect_msg="Check the {ast_path}. {extra}",
sql=None,
start=["expression", "subquery", "sql_script"][0],
exact=None,
):
ast = state.ast_dispatcher.ast_mod
sol_ast = state.solution_ast if sql is None else ast.parse(sql, start)
# if sql is set, exact defaults to False.
# if sql not set, exact defaults to True.
if exact is None:
exact = sql is None
stu_rep = repr(state.student_ast)
sol_rep = repr(sol_ast)
def get_str(ast, code, sql):
if sql:
return sql
if isinstance(ast, str):
return ast
try:
return ast.get_text(code)
except:
return None
sol_str = get_str(state.solution_ast, state.solution_code, sql)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code",
extra="The checker expected to find `{}` in there.".format(sol_str)
if sol_str
else "Something is missing.",
)
if (exact and (sol_rep != stu_rep)) or (not exact and (sol_rep not in stu_rep)):
state.report(Feedback(_msg))
return state | Test whether the student and solution code have identical AST representations
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
incorrect_msg: feedback message if student and solution ASTs don't match
sql : optional code to use instead of the solution ast that is zoomed in on.
start: if ``sql`` arg is used, the parser rule to parse the sql code.
One of 'expression' (the default), 'subquery', or 'sql_script'.
exact: whether to require an exact match (True), or only that the
student AST contains the solution AST. If not specified, this
defaults to ``True`` if ``sql`` is not specified, and to ``False``
if ``sql`` is specified. You can always specify it manually.
:Example:
Example 1 - Suppose the solution code is ::
SELECT * FROM cities
and you want to verify whether the `FROM` part is correct: ::
Ex().check_node('SelectStmt').from_clause().has_equal_ast()
Example 2 - Suppose the solution code is ::
SELECT * FROM b WHERE id > 1 AND name = 'filip'
Then the following SCT makes sure ``id > 1`` was used somewhere in the WHERE clause.::
Ex().check_node('SelectStmt') \\/
.check_edge('where_clause') \\/
.has_equal_ast(sql = 'id > 1') | train | https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_funcs.py#L247-L319 | [
"def get_str(ast, code, sql):\n if sql:\n return sql\n if isinstance(ast, str):\n return ast\n try:\n return ast.get_text(code)\n except:\n return None\n"
] | from functools import partial, wraps
from protowhat.Feedback import Feedback
MSG_CHECK_FALLBACK = "Your submission is incorrect. Try again!"
def requires_ast(f):
@wraps(f)
def wrapper(*args, **kwargs):
state = kwargs.get("state", args[0] if len(args) else None)
state_ast = [state.student_ast, state.solution_ast]
# fail if no ast parser in use
if any(ast is None for ast in state_ast):
raise TypeError(
"Trying to use ast, but it is None. Are you using a parser? {} {}".format(
args, kwargs
)
)
# check whether the parser passed or failed for some code
# if safe_parsing is enabled in the Dispatcher (otherwise an exception would be raised earlier)
ParseError = state.ast_dispatcher.ParseError
parse_fail = any(isinstance(ast, ParseError) for ast in state_ast)
if parse_fail:
return state # skip test
else:
return f(*args, **kwargs) # proceed with test
return wrapper
@requires_ast
def check_node(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{node_name}.",
priority=None,
):
"""Select a node from abstract syntax tree (AST), using its name and index position.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name : the name of the abstract syntax tree node to find.
index: the position of that node (see below for details).
missing_msg: feedback message if node is not in student AST.
priority: the priority level of the node being searched for. This determines whether to
descend into other AST nodes during the search. Higher priority nodes descend
into lower priority. Currently, the only important part of priority is that
setting a very high priority (e.g. 99) will search every node.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can focus on the first select with::
# approach 1: with manually created State instance
state = State(*args, **kwargs)
new_state = check_node(state, 'SelectStmt', 0)
# approach 2: with Ex and chaining
new_state = Ex().check_node('SelectStmt', 0)
"""
df = partial(state.ast_dispatcher, name, priority=priority)
sol_stmt_list = df(state.solution_ast)
try:
sol_stmt = sol_stmt_list[index]
except IndexError:
raise IndexError("Can't get %s statement at index %s" % (name, index))
stu_stmt_list = df(state.student_ast)
try:
stu_stmt = stu_stmt_list[index]
except IndexError:
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
sol_stmt, missing_msg, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
state.report(Feedback(_msg))
action = {
"type": "check_node",
"kwargs": {"name": name, "index": index},
"node": stu_stmt,
}
return state.to_child(
student_ast=stu_stmt, solution_ast=sol_stmt, history=state.history + (action,)
)
@requires_ast
def check_edge(
state,
name,
index=0,
missing_msg="Check the {ast_path}. Could not find the {index}{field_name}.",
):
"""Select an attribute from an abstract syntax tree (AST) node, using the attribute name.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
name: the name of the attribute to select from current AST node.
index: entry to get from a list field. If too few entires, will fail with missing_msg.
missing_msg: feedback message if attribute is not in student AST.
:Example:
If both the student and solution code are.. ::
SELECT a FROM b; SELECT x FROM y;
then we can get the from_clause using ::
# approach 1: with manually created State instance -----
state = State(*args, **kwargs)
select = check_node(state, 'SelectStmt', 0)
clause = check_edge(select, 'from_clause')
# approach 2: with Ex and chaining ---------------------
select = Ex().check_node('SelectStmt', 0) # get first select statement
clause = select.check_edge('from_clause', None) # get from_clause (a list)
clause2 = select.check_edge('from_clause', 0) # get first entry in from_clause
"""
try:
sol_attr = getattr(state.solution_ast, name)
if sol_attr and isinstance(sol_attr, list) and index is not None:
sol_attr = sol_attr[index]
except IndexError:
raise IndexError("Can't get %s attribute" % name)
# use speaker on ast dialect module to get message, or fall back to generic
ast_path = state.get_ast_path() or "highlighted code"
_msg = state.ast_dispatcher.describe(
state.student_ast, missing_msg, field=name, index=index, ast_path=ast_path
)
if _msg is None:
_msg = MSG_CHECK_FALLBACK
try:
stu_attr = getattr(state.student_ast, name)
if stu_attr and isinstance(stu_attr, list) and index is not None:
stu_attr = stu_attr[index]
except:
state.report(Feedback(_msg))
# fail if attribute exists, but is none only for student
if stu_attr is None and sol_attr is not None:
state.report(Feedback(_msg))
action = {"type": "check_edge", "kwargs": {"name": name, "index": index}}
return state.to_child(
student_ast=stu_attr, solution_ast=sol_attr, history=state.history + (action,)
)
import re
def has_code(
state,
text,
incorrect_msg="Check the {ast_path}. The checker expected to find {text}.",
fixed=False,
):
"""Test whether the student code contains text.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: feedback message if text is not in student code.
fixed: whether to match text exactly, rather than using regular expressions.
Note:
Functions like ``check_node`` focus on certain parts of code.
Using these functions followed by ``has_code`` will only look
in the code being focused on.
:Example:
If the student code is.. ::
SELECT a FROM b WHERE id < 100
Then the first test below would (unfortunately) pass, but the second would fail..::
# contained in student code
Ex().has_code(text="id < 10")
# the $ means that you are matching the end of a line
Ex().has_code(text="id < 10$")
By setting ``fixed = True``, you can search for fixed strings::
# without fixed = True, '*' matches any character
Ex().has_code(text="SELECT * FROM b") # passes
Ex().has_code(text="SELECT \\\\* FROM b") # fails
Ex().has_code(text="SELECT * FROM b", fixed=True) # fails
You can check only the code corresponding to the WHERE clause, using ::
where = Ex().check_node('SelectStmt', 0).check_edge('where_clause')
where.has_code(text = "id < 10)
"""
stu_ast = state.student_ast
stu_code = state.student_code
# fallback on using complete student code if no ast
ParseError = state.ast_dispatcher.ParseError
def get_text(ast, code):
if isinstance(ast, ParseError):
return code
try:
return ast.get_text(code)
except:
return code
stu_text = get_text(stu_ast, stu_code)
_msg = incorrect_msg.format(
ast_path=state.get_ast_path() or "highlighted code", text=text
)
# either simple text matching or regex test
res = text in stu_text if fixed else re.search(text, stu_text)
if not res:
state.report(Feedback(_msg))
return state
@requires_ast
def has_parsed_ast(state):
asts = [state.student_ast, state.solution_ast]
if any(isinstance(c, state.ast_dispatcher.ParseError) for c in asts):
state.report(Feedback("AST did not parse"))
return state
|
MycroftAI/mycroft-skills-manager | msm/skill_repo.py | SkillRepo.get_skill_data | python | def get_skill_data(self):
path_to_sha = {
folder: sha for folder, sha in self.get_shas()
}
modules = self.read_file('.gitmodules').split('[submodule "')
for i, module in enumerate(modules):
if not module:
continue
try:
name = module.split('"]')[0].strip()
path = module.split('path = ')[1].split('\n')[0].strip()
url = module.split('url = ')[1].strip()
sha = path_to_sha.get(path, '')
yield name, path, url, sha
except (ValueError, IndexError) as e:
LOG.warning('Failed to parse submodule "{}" #{}:{}'.format(
locals().get('name', ''), i, e
)) | generates tuples of name, path, url, sha | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skill_repo.py#L95-L113 | [
"def read_file(self, filename):\n with open(join(self.path, filename)) as f:\n return f.read()\n",
"def get_shas(self):\n git = Git(self.path)\n with git_to_msm_exceptions():\n shas = git.ls_tree('origin/' + self.branch)\n for line in shas.split('\\n'):\n size, typ, sha, folder = line.split()\n if typ != 'commit':\n continue\n yield folder, sha\n"
] | class SkillRepo(object):
def __init__(self, path=None, url=None, branch=None):
self.path = path or "/opt/mycroft/.skills-repo"
self.url = url or "https://github.com/MycroftAI/mycroft-skills"
self.branch = branch or "19.02"
self.repo_info = {}
self.skills_meta_info = load_skills_data(self.branch)
def read_file(self, filename):
with open(join(self.path, filename)) as f:
return f.read()
def __prepare_repo(self):
if not exists(dirname(self.path)):
makedirs(dirname(self.path))
if not isdir(self.path):
Repo.clone_from(self.url, self.path)
git = Git(self.path)
git.config('remote.origin.url', self.url)
git.fetch()
try:
git.checkout(self.branch)
git.reset('origin/' + self.branch, hard=True)
except GitCommandError:
raise MsmException('Invalid branch: ' + self.branch)
def update(self):
try:
self.__prepare_repo()
except GitError as e:
LOG.warning('Could not prepare repo ({}), '
' Creating temporary repo'.format(repr(e)))
original_path = self.path
self.path = '/tmp/.skills-repo'
try:
with git_to_msm_exceptions():
self.__prepare_repo()
except Exception:
LOG.warning('Could not use temporary repo either ({}), '
' trying to use existing one without '
'update'.format(repr(e)))
self.path = original_path # Restore path to previous value
raise
def get_shas(self):
git = Git(self.path)
with git_to_msm_exceptions():
shas = git.ls_tree('origin/' + self.branch)
for line in shas.split('\n'):
size, typ, sha, folder = line.split()
if typ != 'commit':
continue
yield folder, sha
def get_default_skill_names(self):
for defaults_file in glob(join(self.path, 'DEFAULT-SKILLS*')):
with open(defaults_file) as f:
skills = list(filter(
lambda x: x and not x.startswith('#'),
map(str.strip, f.read().split('\n'))
))
platform = basename(defaults_file).replace('DEFAULT-SKILLS', '')
platform = platform.replace('.', '') or 'default'
yield platform, skills
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.curate_skills_data | python | def curate_skills_data(self, skills_data):
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data | Sync skills_data with actual skills on disk. | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L118-L145 | [
"def build_skill_entry(name, origin, beta) -> dict:\n \"\"\" Create a new skill entry\n\n Arguments:\n name: skill name\n origin: the source of the installation\n beta: Boolean indicating wether the skill is in beta\n Returns:\n populated skills entry\n \"\"\"\n return {\n 'name': name,\n 'origin': origin,\n 'beta': beta,\n 'status': 'active',\n 'installed': 0,\n 'updated': 0,\n 'installation': 'installed'\n }\n",
"def list_defaults(self):\n skill_groups = self.list_all_defaults()\n\n if self.platform not in skill_groups:\n LOG.error('Unknown platform:' + self.platform)\n return skill_groups.get(self.platform,\n skill_groups.get('default', []))\n",
"def list(self):\n \"\"\"\n Load a list of SkillEntry objects from both local and\n remote skills\n\n It is necessary to load both local and remote skills at\n the same time to correctly associate local skills with the name\n in the repo and remote skills with any custom path that they\n have been downloaded to\n \"\"\"\n try:\n self.repo.update()\n except GitException as e:\n if not isdir(self.repo.path):\n raise\n LOG.warning('Failed to update repo: {}'.format(repr(e)))\n remote_skill_list = (\n SkillEntry(\n name, SkillEntry.create_path(self.skills_dir, url, name),\n url, sha if self.versioned else '', msm=self\n )\n for name, path, url, sha in self.repo.get_skill_data()\n )\n remote_skills = {\n skill.id: skill for skill in remote_skill_list\n }\n all_skills = []\n for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):\n skill = SkillEntry.from_folder(dirname(skill_file), msm=self)\n if skill.id in remote_skills:\n skill.attach(remote_skills.pop(skill.id))\n all_skills.append(skill)\n all_skills += list(remote_skills.values())\n return all_skills\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.sync_skills_data | python | def sync_skills_data(self):
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data) | Update internal skill_data_structure from disk. | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L155-L161 | [
"def skills_data_hash(data):\n return hash(json.dumps(data, sort_keys=True))\n",
"def load_skills_data(self) -> dict:\n skills_data = load_skills_data()\n if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:\n skills_data = self.__upgrade_skills_data(skills_data)\n else:\n skills_data = self.curate_skills_data(skills_data)\n return skills_data\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.write_skills_data | python | def write_skills_data(self, data=None):
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data) | Write skills data hash if it has been modified. | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L163-L168 | [
"def write_skills_data(data: dict):\n skills_data_file = expanduser('~/.mycroft/skills.json')\n with open(skills_data_file, 'w') as f:\n json.dump(data, f, indent=4, separators=(',', ':'))\n",
"def skills_data_hash(data):\n return hash(json.dumps(data, sort_keys=True))\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.install | python | def install(self, param, author=None, constraints=None, origin=''):
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry) | Install by url or name | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L171-L195 | [
"def build_skill_entry(name, origin, beta) -> dict:\n \"\"\" Create a new skill entry\n\n Arguments:\n name: skill name\n origin: the source of the installation\n beta: Boolean indicating wether the skill is in beta\n Returns:\n populated skills entry\n \"\"\"\n return {\n 'name': name,\n 'origin': origin,\n 'beta': beta,\n 'status': 'active',\n 'installed': 0,\n 'updated': 0,\n 'installation': 'installed'\n }\n",
"def find_skill(self, param, author=None, skills=None):\n # type: (str, str, List[SkillEntry]) -> SkillEntry\n \"\"\"Find skill by name or url\"\"\"\n if param.startswith('https://') or param.startswith('http://'):\n repo_id = SkillEntry.extract_repo_id(param)\n for skill in self.list():\n if skill.id == repo_id:\n return skill\n name = SkillEntry.extract_repo_name(param)\n path = SkillEntry.create_path(self.skills_dir, param)\n return SkillEntry(name, path, param, msm=self)\n else:\n skill_confs = {\n skill: skill.match(param, author)\n for skill in skills or self.list()\n }\n best_skill, score = max(skill_confs.items(), key=lambda x: x[1])\n LOG.info('Best match ({}): {} by {}'.format(\n round(score, 2), best_skill.name, best_skill.author)\n )\n if score < 0.3:\n raise SkillNotFound(param)\n low_bound = (score * 0.7) if score != 1.0 else 1.0\n\n close_skills = [\n skill for skill, conf in skill_confs.items()\n if conf >= low_bound and skill != best_skill\n ]\n if close_skills:\n raise MultipleSkillMatches([best_skill] + close_skills)\n return best_skill\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.remove | python | def remove(self, param, author=None):
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return | Remove by url or name | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L198-L208 | [
"def find_skill(self, param, author=None, skills=None):\n # type: (str, str, List[SkillEntry]) -> SkillEntry\n \"\"\"Find skill by name or url\"\"\"\n if param.startswith('https://') or param.startswith('http://'):\n repo_id = SkillEntry.extract_repo_id(param)\n for skill in self.list():\n if skill.id == repo_id:\n return skill\n name = SkillEntry.extract_repo_name(param)\n path = SkillEntry.create_path(self.skills_dir, param)\n return SkillEntry(name, path, param, msm=self)\n else:\n skill_confs = {\n skill: skill.match(param, author)\n for skill in skills or self.list()\n }\n best_skill, score = max(skill_confs.items(), key=lambda x: x[1])\n LOG.info('Best match ({}): {} by {}'.format(\n round(score, 2), best_skill.name, best_skill.author)\n )\n if score < 0.3:\n raise SkillNotFound(param)\n low_bound = (score * 0.7) if score != 1.0 else 1.0\n\n close_skills = [\n skill for skill, conf in skill_confs.items()\n if conf >= low_bound and skill != best_skill\n ]\n if close_skills:\n raise MultipleSkillMatches([best_skill] + close_skills)\n return best_skill\n",
"def remove(self):\n if not self.is_local:\n raise AlreadyRemoved(self.name)\n try:\n rmtree(self.path)\n self.is_local = False\n except OSError as e:\n raise RemoveException(str(e))\n\n LOG.info('Successfully removed ' + self.name)\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.update | python | def update(self, skill=None, author=None):
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time() | Update all downloaded skills or one specified skill. | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L224-L237 | [
"def get_skill_entry(name, skills_data) -> dict:\n \"\"\" Find a skill entry in the skills_data and returns it. \"\"\"\n for e in skills_data.get('skills', []):\n if e.get('name') == name:\n return e\n return {}\n",
"def update_all(self):\n local_skills = [skill for skill in self.list() if skill.is_local]\n\n def update_skill(skill):\n entry = get_skill_entry(skill.name, self.skills_data)\n if entry:\n entry['beta'] = skill.is_beta\n if skill.update():\n if entry:\n entry['updated'] = time.time()\n\n return self.apply(update_skill, local_skills)\n",
"def find_skill(self, param, author=None, skills=None):\n # type: (str, str, List[SkillEntry]) -> SkillEntry\n \"\"\"Find skill by name or url\"\"\"\n if param.startswith('https://') or param.startswith('http://'):\n repo_id = SkillEntry.extract_repo_id(param)\n for skill in self.list():\n if skill.id == repo_id:\n return skill\n name = SkillEntry.extract_repo_name(param)\n path = SkillEntry.create_path(self.skills_dir, param)\n return SkillEntry(name, path, param, msm=self)\n else:\n skill_confs = {\n skill: skill.match(param, author)\n for skill in skills or self.list()\n }\n best_skill, score = max(skill_confs.items(), key=lambda x: x[1])\n LOG.info('Best match ({}): {} by {}'.format(\n round(score, 2), best_skill.name, best_skill.author)\n )\n if score < 0.3:\n raise SkillNotFound(param)\n low_bound = (score * 0.7) if score != 1.0 else 1.0\n\n close_skills = [\n skill for skill, conf in skill_confs.items()\n if conf >= low_bound and skill != best_skill\n ]\n if close_skills:\n raise MultipleSkillMatches([best_skill] + close_skills)\n return best_skill\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.apply | python | def apply(self, func, skills):
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills) | Run a function on all skills in parallel | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L240-L258 | null | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.install_defaults | python | def install_defaults(self):
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults()) | Installs the default skills, updates all others | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L261-L270 | [
"def list_defaults(self):\n skill_groups = self.list_all_defaults()\n\n if self.platform not in skill_groups:\n LOG.error('Unknown platform:' + self.platform)\n return skill_groups.get(self.platform,\n skill_groups.get('default', []))\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.list_all_defaults | python | def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults | Returns {'skill_group': [SkillEntry('name')]} | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L272-L287 | [
"def list(self):\n \"\"\"\n Load a list of SkillEntry objects from both local and\n remote skills\n\n It is necessary to load both local and remote skills at\n the same time to correctly associate local skills with the name\n in the repo and remote skills with any custom path that they\n have been downloaded to\n \"\"\"\n try:\n self.repo.update()\n except GitException as e:\n if not isdir(self.repo.path):\n raise\n LOG.warning('Failed to update repo: {}'.format(repr(e)))\n remote_skill_list = (\n SkillEntry(\n name, SkillEntry.create_path(self.skills_dir, url, name),\n url, sha if self.versioned else '', msm=self\n )\n for name, path, url, sha in self.repo.get_skill_data()\n )\n remote_skills = {\n skill.id: skill for skill in remote_skill_list\n }\n all_skills = []\n for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):\n skill = SkillEntry.from_folder(dirname(skill_file), msm=self)\n if skill.id in remote_skills:\n skill.attach(remote_skills.pop(skill.id))\n all_skills.append(skill)\n all_skills += list(remote_skills.values())\n return all_skills\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.list | python | def list(self):
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills | Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L297-L330 | [
"def from_folder(cls, path, msm=None):\n return cls(basename(path), path, cls.find_git_url(path), msm=msm)\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
MycroftAI/mycroft-skills-manager | msm/mycroft_skills_manager.py | MycroftSkillsManager.find_skill | python | def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill | Find skill by name or url | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L332-L362 | [
"def list(self):\n \"\"\"\n Load a list of SkillEntry objects from both local and\n remote skills\n\n It is necessary to load both local and remote skills at\n the same time to correctly associate local skills with the name\n in the repo and remote skills with any custom path that they\n have been downloaded to\n \"\"\"\n try:\n self.repo.update()\n except GitException as e:\n if not isdir(self.repo.path):\n raise\n LOG.warning('Failed to update repo: {}'.format(repr(e)))\n remote_skill_list = (\n SkillEntry(\n name, SkillEntry.create_path(self.skills_dir, url, name),\n url, sha if self.versioned else '', msm=self\n )\n for name, path, url, sha in self.repo.get_skill_data()\n )\n remote_skills = {\n skill.id: skill for skill in remote_skill_list\n }\n all_skills = []\n for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):\n skill = SkillEntry.from_folder(dirname(skill_file), msm=self)\n if skill.id in remote_skills:\n skill.attach(remote_skills.pop(skill.id))\n all_skills.append(skill)\n all_skills += list(remote_skills.values())\n return all_skills\n",
"def create_path(cls, folder, url, name=''):\n return join(folder, '{}.{}'.format(\n name or cls.extract_repo_name(url), cls.extract_author(url)\n ).lower())\n",
"def extract_repo_name(url):\n s = url.rstrip('/').split(\"/\")[-1]\n a, b, c = s.rpartition('.git')\n if not c:\n return a\n return s\n",
"def extract_repo_id(cls, url):\n return '{}:{}'.format(cls.extract_author(url).lower(),\n cls.extract_repo_name(url)).lower()\n"
] | class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
|
MycroftAI/mycroft-skills-manager | msm/skills_data.py | load_skills_data | python | def load_skills_data() -> dict:
skills_data_file = expanduser('~/.mycroft/skills.json')
if isfile(skills_data_file):
try:
with open(skills_data_file) as f:
return json.load(f)
except json.JSONDecodeError:
return {}
else:
return {} | Contains info on how skills should be updated | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skills_data.py#L9-L19 | null | """
Functions related to manipulating the skills_data.json
"""
import json
from os.path import expanduser, isfile
def write_skills_data(data: dict):
skills_data_file = expanduser('~/.mycroft/skills.json')
with open(skills_data_file, 'w') as f:
json.dump(data, f, indent=4, separators=(',', ':'))
def get_skill_entry(name, skills_data) -> dict:
""" Find a skill entry in the skills_data and returns it. """
for e in skills_data.get('skills', []):
if e.get('name') == name:
return e
return {}
def build_skill_entry(name, origin, beta) -> dict:
""" Create a new skill entry
Arguments:
name: skill name
origin: the source of the installation
beta: Boolean indicating wether the skill is in beta
Returns:
populated skills entry
"""
return {
'name': name,
'origin': origin,
'beta': beta,
'status': 'active',
'installed': 0,
'updated': 0,
'installation': 'installed'
}
def skills_data_hash(data):
return hash(json.dumps(data, sort_keys=True))
|
MycroftAI/mycroft-skills-manager | msm/skills_data.py | get_skill_entry | python | def get_skill_entry(name, skills_data) -> dict:
for e in skills_data.get('skills', []):
if e.get('name') == name:
return e
return {} | Find a skill entry in the skills_data and returns it. | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skills_data.py#L28-L33 | null | """
Functions related to manipulating the skills_data.json
"""
import json
from os.path import expanduser, isfile
def load_skills_data() -> dict:
"""Contains info on how skills should be updated"""
skills_data_file = expanduser('~/.mycroft/skills.json')
if isfile(skills_data_file):
try:
with open(skills_data_file) as f:
return json.load(f)
except json.JSONDecodeError:
return {}
else:
return {}
def write_skills_data(data: dict):
skills_data_file = expanduser('~/.mycroft/skills.json')
with open(skills_data_file, 'w') as f:
json.dump(data, f, indent=4, separators=(',', ':'))
def build_skill_entry(name, origin, beta) -> dict:
""" Create a new skill entry
Arguments:
name: skill name
origin: the source of the installation
beta: Boolean indicating wether the skill is in beta
Returns:
populated skills entry
"""
return {
'name': name,
'origin': origin,
'beta': beta,
'status': 'active',
'installed': 0,
'updated': 0,
'installation': 'installed'
}
def skills_data_hash(data):
return hash(json.dumps(data, sort_keys=True))
|
MycroftAI/mycroft-skills-manager | msm/skill_entry.py | _backup_previous_version | python | def _backup_previous_version(func: Callable = None):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.old_path = None
if self.is_local:
self.old_path = join(gettempdir(), self.name)
if exists(self.old_path):
rmtree(self.old_path)
shutil.copytree(self.path, self.old_path)
try:
func(self, *args, **kwargs)
# Modified skill or GitError should not restore working copy
except (SkillModified, GitError, GitException):
raise
except Exception:
LOG.info('Problem performing action. Restoring skill to '
'previous state...')
if exists(self.path):
rmtree(self.path)
if self.old_path and exists(self.old_path):
shutil.copytree(self.old_path, self.path)
self.is_local = exists(self.path)
raise
return wrapper | Private decorator to back up previous skill folder | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skill_entry.py#L69-L96 | null | # Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import logging
import os
import shutil
import subprocess
import yaml
from contextlib import contextmanager
from difflib import SequenceMatcher
from functools import wraps
from git import Repo, GitError
from git.exc import GitCommandError
from lazy import lazy
from os.path import exists, join, basename, dirname, isfile
from shutil import rmtree, move
from subprocess import PIPE, Popen
from tempfile import mktemp, gettempdir
from threading import Lock
from typing import Callable
from pako import PakoManager
from msm import SkillRequirementsException, git_to_msm_exceptions
from msm.exceptions import PipRequirementsException, \
SystemRequirementsException, AlreadyInstalled, SkillModified, \
AlreadyRemoved, RemoveException, CloneException, NotInstalled, GitException
from msm.util import Git
LOG = logging.getLogger(__name__)
# Branches which can be switched from when updating
# TODO Make this configurable
SWITCHABLE_BRANCHES = ['master']
# default constraints to use if no are given
DEFAULT_CONSTRAINTS = '/etc/mycroft/constraints.txt'
@contextmanager
def work_dir(directory):
old_dir = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(old_dir)
class SkillEntry(object):
pip_lock = Lock()
manifest_yml_format = {
'dependencies': {
'system': {},
'exes': [],
'skill': [],
'python': []
}
}
def __init__(self, name, path, url='', sha='', msm=None):
url = url.rstrip('/')
url = url[:-len('.git')] if url.endswith('.git') else url
self.name = name
self.path = path
self.url = url
self.sha = sha
self.msm = msm
if msm:
u = url.lower()
self.meta_info = msm.repo.skills_meta_info.get(u, {})
else:
self.meta_info = {}
self.author = self.extract_author(url) if url else ''
self.id = self.extract_repo_id(url) if url else name
self.is_local = exists(path)
self.old_path = None # Path of previous version while upgrading
@property
def is_beta(self):
return not self.sha or self.sha == 'HEAD'
def __str__(self):
return self.name
def attach(self, remote_entry):
"""Attach a remote entry to a local entry"""
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self
@classmethod
def from_folder(cls, path, msm=None):
return cls(basename(path), path, cls.find_git_url(path), msm=msm)
@classmethod
def create_path(cls, folder, url, name=''):
return join(folder, '{}.{}'.format(
name or cls.extract_repo_name(url), cls.extract_author(url)
).lower())
@staticmethod
def extract_repo_name(url):
s = url.rstrip('/').split("/")[-1]
a, b, c = s.rpartition('.git')
if not c:
return a
return s
@staticmethod
def extract_author(url):
return url.rstrip('/').split("/")[-2].split(':')[-1]
@classmethod
def extract_repo_id(cls, url):
return '{}:{}'.format(cls.extract_author(url).lower(),
cls.extract_repo_name(url)).lower()
@staticmethod
def _tokenize(x):
return x.replace('-', ' ').split()
@staticmethod
def _extract_tokens(s, tokens):
s = s.lower().replace('-', ' ')
extracted = []
for token in tokens:
extracted += [token] * s.count(token)
s = s.replace(token, '')
s = ' '.join(i for i in s.split(' ') if i)
tokens = [i for i in s.split(' ') if i]
return s, tokens, extracted
@classmethod
def _compare(cls, a, b):
return SequenceMatcher(a=a, b=b).ratio()
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
pip_args += self.dependent_python_packages
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
proc = Popen(pip_args, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: {}'.format(
self.name, key
))
continue
self.verify_info(info[key], fmt[key])
@lazy
def skill_info(self):
yml_path = join(self.path, 'manifest.yml')
if exists(yml_path):
LOG.info('Reading from manifest.yml')
with open(yml_path) as f:
info = yaml.load(f)
self.verify_info(info, self.manifest_yml_format)
return info or {}
return {}
@lazy
def dependencies(self):
return self.skill_info.get('dependencies') or {}
@lazy
def dependent_skills(self):
skills = set()
reqs = join(self.path, "skill_requirements.txt")
if exists(reqs):
with open(reqs, "r") as f:
for i in f.readlines():
skill = i.strip()
if skill:
skills.add(skill)
for i in self.dependencies.get('skill') or []:
skills.add(i)
return list(skills)
@lazy
def dependent_python_packages(self):
reqs = join(self.path, "requirements.txt")
req_lines = []
if exists(reqs):
with open(reqs, "r") as f:
req_lines += f.readlines()
req_lines += self.dependencies.get('python') or []
# Strip comments
req_lines = [l.split('#')[0].strip() for l in req_lines]
return [i for i in req_lines if i] # Strip empty lines
@lazy
def dependent_system_packages(self):
return self.dependencies.get('system') or {}
def remove(self):
if not self.is_local:
raise AlreadyRemoved(self.name)
try:
rmtree(self.path)
self.is_local = False
except OSError as e:
raise RemoveException(str(e))
LOG.info('Successfully removed ' + self.name)
@_backup_previous_version
def install(self, constraints=None):
if self.is_local:
raise AlreadyInstalled(self.name)
LOG.info("Downloading skill: " + self.url)
try:
tmp_location = mktemp()
Repo.clone_from(self.url, tmp_location)
self.is_local = True
Git(tmp_location).reset(self.sha or 'HEAD', hard=True)
except GitCommandError as e:
raise CloneException(e.stderr)
if isfile(join(tmp_location, '__init__.py')):
move(join(tmp_location, '__init__.py'),
join(tmp_location, '__init__'))
try:
move(tmp_location, self.path)
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
finally:
if isfile(join(self.path, '__init__')):
move(join(self.path, '__init__'),
join(self.path, '__init__.py'))
LOG.info('Successfully installed ' + self.name)
def update_deps(self, constraints=None):
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
def _find_sha_branch(self):
git = Git(self.path)
sha_branches = git.branch(
contains=self.sha, all=True
).split('\n')
sha_branch = [b for b in sha_branches if ' -> ' not in b][0]
sha_branch = sha_branch.strip('* \n').replace('remotes/', '')
for remote in git.remote().split('\n'):
sha_branch = sha_branch.replace(remote + '/', '')
return sha_branch
@_backup_previous_version
def update(self):
if not self.is_local:
raise NotInstalled('{} is not installed'.format(self.name))
git = Git(self.path)
with git_to_msm_exceptions():
sha_before = git.rev_parse('HEAD')
modified_files = git.status(porcelain=True, untracked='no')
if modified_files != '':
raise SkillModified('Uncommitted changes:\n' + modified_files)
git.fetch()
current_branch = git.rev_parse('--abbrev-ref', 'HEAD').strip()
if self.sha and current_branch in SWITCHABLE_BRANCHES:
# Check out correct branch
git.checkout(self._find_sha_branch())
git.merge(self.sha or 'origin/HEAD', ff_only=True)
sha_after = git.rev_parse('HEAD')
if sha_before != sha_after:
self.update_deps()
LOG.info('Updated ' + self.name)
# Trigger reload by modifying the timestamp
os.utime(join(self.path, '__init__.py'))
return True
else:
LOG.info('Nothing new for ' + self.name)
return False
@staticmethod
def find_git_url(path):
"""Get the git url from a folder"""
try:
return Git(path).config('remote.origin.url')
except GitError:
return ''
def __repr__(self):
return '<SkillEntry {}>'.format(' '.join(
'{}={}'.format(attr, self.__dict__[attr])
for attr in ['name', 'author', 'is_local']
))
|
MycroftAI/mycroft-skills-manager | msm/skill_entry.py | SkillEntry.attach | python | def attach(self, remote_entry):
self.name = remote_entry.name
self.sha = remote_entry.sha
self.url = remote_entry.url
self.author = remote_entry.author
return self | Attach a remote entry to a local entry | train | https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skill_entry.py#L136-L142 | null | class SkillEntry(object):
pip_lock = Lock()
manifest_yml_format = {
'dependencies': {
'system': {},
'exes': [],
'skill': [],
'python': []
}
}
def __init__(self, name, path, url='', sha='', msm=None):
url = url.rstrip('/')
url = url[:-len('.git')] if url.endswith('.git') else url
self.name = name
self.path = path
self.url = url
self.sha = sha
self.msm = msm
if msm:
u = url.lower()
self.meta_info = msm.repo.skills_meta_info.get(u, {})
else:
self.meta_info = {}
self.author = self.extract_author(url) if url else ''
self.id = self.extract_repo_id(url) if url else name
self.is_local = exists(path)
self.old_path = None # Path of previous version while upgrading
@property
def is_beta(self):
return not self.sha or self.sha == 'HEAD'
def __str__(self):
return self.name
@classmethod
def from_folder(cls, path, msm=None):
return cls(basename(path), path, cls.find_git_url(path), msm=msm)
@classmethod
def create_path(cls, folder, url, name=''):
return join(folder, '{}.{}'.format(
name or cls.extract_repo_name(url), cls.extract_author(url)
).lower())
@staticmethod
def extract_repo_name(url):
s = url.rstrip('/').split("/")[-1]
a, b, c = s.rpartition('.git')
if not c:
return a
return s
@staticmethod
def extract_author(url):
return url.rstrip('/').split("/")[-2].split(':')[-1]
@classmethod
def extract_repo_id(cls, url):
return '{}:{}'.format(cls.extract_author(url).lower(),
cls.extract_repo_name(url)).lower()
@staticmethod
def _tokenize(x):
return x.replace('-', ' ').split()
@staticmethod
def _extract_tokens(s, tokens):
s = s.lower().replace('-', ' ')
extracted = []
for token in tokens:
extracted += [token] * s.count(token)
s = s.replace(token, '')
s = ' '.join(i for i in s.split(' ') if i)
tokens = [i for i in s.split(' ') if i]
return s, tokens, extracted
@classmethod
def _compare(cls, a, b):
return SequenceMatcher(a=a, b=b).ratio()
def match(self, query, author=None):
search, search_tokens, search_common = self._extract_tokens(
query, ['skill', 'fallback', 'mycroft']
)
name, name_tokens, name_common = self._extract_tokens(
self.name, ['skill', 'fallback', 'mycroft']
)
weights = [
(9, self._compare(name, search)),
(9, self._compare(name.split(' '), search_tokens)),
(2, self._compare(name_common, search_common)),
]
if author:
author_weight = self._compare(self.author, author)
weights.append((5, author_weight))
author_weight = author_weight
else:
author_weight = 1.0
return author_weight * (
sum(weight * val for weight, val in weights) /
sum(weight for weight, val in weights)
)
def run_pip(self, constraints=None):
if not self.dependent_python_packages:
return
# Use constraints to limit the installed versions
if constraints and not exists(constraints):
LOG.error('Couldn\'t find the constraints file')
return False
elif exists(DEFAULT_CONSTRAINTS):
constraints = DEFAULT_CONSTRAINTS
LOG.info('Installing requirements.txt for ' + self.name)
can_pip = os.access(dirname(sys.executable), os.W_OK | os.X_OK)
pip_args = [sys.executable, '-m', 'pip', 'install']
pip_args += self.dependent_python_packages
if constraints:
pip_args += ['-c', constraints]
if not can_pip:
pip_args = ['sudo', '-n'] + pip_args
with self.pip_lock:
proc = Popen(pip_args, stdout=PIPE, stderr=PIPE)
pip_code = proc.wait()
if pip_code != 0:
stderr = proc.stderr.read().decode()
if pip_code == 1 and 'sudo:' in stderr and pip_args[0] == 'sudo':
raise PipRequirementsException(
2, '', 'Permission denied while installing pip '
'dependencies. Please run in virtualenv or use sudo'
)
raise PipRequirementsException(
pip_code, proc.stdout.read().decode(), stderr
)
return True
def install_system_deps(self):
self.run_requirements_sh()
system_packages = {
exe: (packages or '').split()
for exe, packages in self.dependent_system_packages.items()
}
LOG.info('Installing system requirements...')
all_deps = system_packages.pop('all', [])
try:
manager = PakoManager()
success = manager.install(all_deps, overrides=system_packages)
except RuntimeError as e:
LOG.warning('Failed to launch package manager: {}'.format(e))
success = False
missing_exes = [
exe for exe in self.dependencies.get('exes') or []
if not shutil.which(exe)
]
if missing_exes:
if not success:
LOG.warning('Failed to install dependencies.')
if all_deps:
LOG.warning('Please install manually: {}'.format(
' '.join(all_deps)
))
raise SkillRequirementsException('Could not find exes: {}'.format(
', '.join(missing_exes)
))
return success
def run_requirements_sh(self):
setup_script = join(self.path, "requirements.sh")
if not exists(setup_script):
return False
with work_dir(self.path):
rc = subprocess.call(["bash", setup_script])
if rc != 0:
LOG.error("Requirements.sh failed with error code: " + str(rc))
raise SystemRequirementsException(rc)
LOG.info("Successfully ran requirements.sh for " + self.name)
return True
def run_skill_requirements(self):
if not self.msm:
raise ValueError('Pass msm to SkillEntry to install skill deps')
try:
for skill_dep in self.dependent_skills:
LOG.info("Installing skill dependency: {}".format(skill_dep))
try:
self.msm.install(skill_dep)
except AlreadyInstalled:
pass
except Exception as e:
raise SkillRequirementsException(e)
def verify_info(self, info, fmt):
if not info:
return
if not isinstance(info, type(fmt)):
LOG.warning('Invalid value type manifest.yml for {}: {}'.format(
self.name, type(info)
))
return
if not isinstance(info, dict) or not fmt:
return
for key in info:
if key not in fmt:
LOG.warning('Unknown key in manifest.yml for {}: {}'.format(
self.name, key
))
continue
self.verify_info(info[key], fmt[key])
@lazy
def skill_info(self):
yml_path = join(self.path, 'manifest.yml')
if exists(yml_path):
LOG.info('Reading from manifest.yml')
with open(yml_path) as f:
info = yaml.load(f)
self.verify_info(info, self.manifest_yml_format)
return info or {}
return {}
@lazy
def dependencies(self):
return self.skill_info.get('dependencies') or {}
@lazy
def dependent_skills(self):
skills = set()
reqs = join(self.path, "skill_requirements.txt")
if exists(reqs):
with open(reqs, "r") as f:
for i in f.readlines():
skill = i.strip()
if skill:
skills.add(skill)
for i in self.dependencies.get('skill') or []:
skills.add(i)
return list(skills)
@lazy
def dependent_python_packages(self):
reqs = join(self.path, "requirements.txt")
req_lines = []
if exists(reqs):
with open(reqs, "r") as f:
req_lines += f.readlines()
req_lines += self.dependencies.get('python') or []
# Strip comments
req_lines = [l.split('#')[0].strip() for l in req_lines]
return [i for i in req_lines if i] # Strip empty lines
@lazy
def dependent_system_packages(self):
return self.dependencies.get('system') or {}
def remove(self):
if not self.is_local:
raise AlreadyRemoved(self.name)
try:
rmtree(self.path)
self.is_local = False
except OSError as e:
raise RemoveException(str(e))
LOG.info('Successfully removed ' + self.name)
@_backup_previous_version
def install(self, constraints=None):
if self.is_local:
raise AlreadyInstalled(self.name)
LOG.info("Downloading skill: " + self.url)
try:
tmp_location = mktemp()
Repo.clone_from(self.url, tmp_location)
self.is_local = True
Git(tmp_location).reset(self.sha or 'HEAD', hard=True)
except GitCommandError as e:
raise CloneException(e.stderr)
if isfile(join(tmp_location, '__init__.py')):
move(join(tmp_location, '__init__.py'),
join(tmp_location, '__init__'))
try:
move(tmp_location, self.path)
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
finally:
if isfile(join(self.path, '__init__')):
move(join(self.path, '__init__'),
join(self.path, '__init__.py'))
LOG.info('Successfully installed ' + self.name)
def update_deps(self, constraints=None):
if self.msm:
self.run_skill_requirements()
self.install_system_deps()
self.run_pip(constraints)
def _find_sha_branch(self):
git = Git(self.path)
sha_branches = git.branch(
contains=self.sha, all=True
).split('\n')
sha_branch = [b for b in sha_branches if ' -> ' not in b][0]
sha_branch = sha_branch.strip('* \n').replace('remotes/', '')
for remote in git.remote().split('\n'):
sha_branch = sha_branch.replace(remote + '/', '')
return sha_branch
@_backup_previous_version
def update(self):
if not self.is_local:
raise NotInstalled('{} is not installed'.format(self.name))
git = Git(self.path)
with git_to_msm_exceptions():
sha_before = git.rev_parse('HEAD')
modified_files = git.status(porcelain=True, untracked='no')
if modified_files != '':
raise SkillModified('Uncommitted changes:\n' + modified_files)
git.fetch()
current_branch = git.rev_parse('--abbrev-ref', 'HEAD').strip()
if self.sha and current_branch in SWITCHABLE_BRANCHES:
# Check out correct branch
git.checkout(self._find_sha_branch())
git.merge(self.sha or 'origin/HEAD', ff_only=True)
sha_after = git.rev_parse('HEAD')
if sha_before != sha_after:
self.update_deps()
LOG.info('Updated ' + self.name)
# Trigger reload by modifying the timestamp
os.utime(join(self.path, '__init__.py'))
return True
else:
LOG.info('Nothing new for ' + self.name)
return False
@staticmethod
def find_git_url(path):
"""Get the git url from a folder"""
try:
return Git(path).config('remote.origin.url')
except GitError:
return ''
def __repr__(self):
return '<SkillEntry {}>'.format(' '.join(
'{}={}'.format(attr, self.__dict__[attr])
for attr in ['name', 'author', 'is_local']
))
|
Hundemeier/sacn | sacn/sender.py | sACNsender.activate_output | python | def activate_output(self, universe: int) -> None:
check_universe(universe)
# check, if the universe already exists in the list:
if universe in self._outputs:
return
# add new sending:
new_output = Output(DataPacket(cid=self.__CID, sourceName=self.source_name, universe=universe))
self._outputs[universe] = new_output | Activates a universe that's then starting to sending every second.
See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information
:param universe: the universe to activate | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/sender.py#L56-L68 | [
"def check_universe(universe: int):\n if universe not in range(1, 64000):\n raise TypeError(f'Universe must be between [1-63999]! Universe was {universe}')\n"
] | class sACNsender:
def __init__(self, bind_address: str = "0.0.0.0", bind_port: int = DEFAULT_PORT,
source_name: str = "default source name", cid: tuple = (),
fps: int = 30, universeDiscovery: bool = True):
"""
Creates a sender object. A sender is used to manage multiple sACN universes and handles their sending.
DMX data is send out every second, when no data changes. Some changes may be not send out, because the fps
setting defines how often packets are send out to prevent network overuse. So if you change the DMX values too
often in a second they may not all been send. Vary the fps parameter to your needs (Default=30).
Note that a bind address is needed on Windows for sending out multicast packets.
:param bind_address: the IP-Address to bind to.
For multicast on a Windows machine this must be set to a proper value otherwise omit.
:param bind_port: optionally bind to a specific port. Default=5568. It is not recommended to change the port.
Change the port number if you have trouble with another program or the sACNreceiver blocking the port
:param source_name: the source name used in the sACN packets.
:param cid: the cid. If not given, a random CID will be generated.
:param fps: the frames per second. See above explanation. Has to be >0
"""
self.source_name: str = source_name
if len(cid) != 16:
cid = tuple(int(random.random() * 255) for _ in range(0, 16))
self.__CID: tuple = cid
self._outputs: Dict[int, Output] = {}
self._fps = fps
self.bindAddress = bind_address
self.bind_port = bind_port
self._output_thread: OutputThread = None
self._universeDiscovery: bool = universeDiscovery
@property
def universeDiscovery(self) -> bool:
return self._universeDiscovery
@universeDiscovery.setter
def universeDiscovery(self, universeDiscovery: bool) -> None:
self._universeDiscovery = universeDiscovery
try: # try to set the value for the output thread
self._output_thread.universeDiscovery = universeDiscovery
except:
pass
def deactivate_output(self, universe: int) -> None:
"""
Deactivates an existing sending. Every data from the existing sending output will be lost.
(TTL, Multicast, DMX data, ..)
:param universe: the universe to deactivate. If the universe was not activated before, no error is raised
"""
check_universe(universe)
try: # try to send out three messages with stream_termination bit set to 1
self._outputs[universe]._packet.option_StreamTerminated = True
for i in range(0, 3):
self._output_thread.send_out(self._outputs[universe])
except:
pass
try:
del self._outputs[universe]
except:
pass
def get_active_outputs(self) -> tuple:
"""
Returns a list with all active outputs. Useful when iterating over all sender indexes.
:return: list: a list with int (every int is a activated universe. May be not sorted)
"""
return tuple(self._outputs.keys())
def move_universe(self, universe_from: int, universe_to: int) -> None:
"""
Moves an sending from one universe to another. All settings are being restored and only the universe changes
:param universe_from: the universe that should be moved
:param universe_to: the target universe. An existing universe will be overwritten
"""
check_universe(universe_from)
check_universe(universe_to)
# store the sending object and change the universe in the packet of the sending
tmp_output = self._outputs[universe_from]
tmp_output._packet.universe = universe_to
# deactivate sending
self.deactivate_output(universe_from)
# activate new sending with the new universe
self._outputs[universe_to] = tmp_output
def __getitem__(self, item: int) -> Output:
try:
return self._outputs[item]
except:
return None
def start(self, bind_address=None, bind_port: int = None, fps: int = None) -> None:
"""
Starts or restarts a new Thread with the parameters given in the constructor or
the parameters given in this function.
The parameters in this function do not override the class specific values!
:param bind_address: the IP-Address to bind to
:param bind_port: the port to bind to
:param fps: the fps to use. Note: this is not precisely hold, use for load balance in the network
"""
if bind_address is None:
bind_address = self.bindAddress
if fps is None:
fps = self._fps
if bind_port is None:
bind_port = self.bind_port
self.stop()
self._output_thread = OutputThread(cid=self.__CID, source_name=self.source_name,
outputs=self._outputs, bind_address=bind_address,
bind_port=bind_port, fps=fps, universe_discovery=self._universeDiscovery)
self._output_thread.start()
def stop(self) -> None:
"""
Tries to stop a current running sender. A running Thread will be stopped and should terminate.
"""
try:
self._output_thread.enabled_flag = False
except:
pass
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/sender.py | sACNsender.deactivate_output | python | def deactivate_output(self, universe: int) -> None:
check_universe(universe)
try: # try to send out three messages with stream_termination bit set to 1
self._outputs[universe]._packet.option_StreamTerminated = True
for i in range(0, 3):
self._output_thread.send_out(self._outputs[universe])
except:
pass
try:
del self._outputs[universe]
except:
pass | Deactivates an existing sending. Every data from the existing sending output will be lost.
(TTL, Multicast, DMX data, ..)
:param universe: the universe to deactivate. If the universe was not activated before, no error is raised | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/sender.py#L70-L86 | [
"def check_universe(universe: int):\n if universe not in range(1, 64000):\n raise TypeError(f'Universe must be between [1-63999]! Universe was {universe}')\n"
] | class sACNsender:
def __init__(self, bind_address: str = "0.0.0.0", bind_port: int = DEFAULT_PORT,
source_name: str = "default source name", cid: tuple = (),
fps: int = 30, universeDiscovery: bool = True):
"""
Creates a sender object. A sender is used to manage multiple sACN universes and handles their sending.
DMX data is send out every second, when no data changes. Some changes may be not send out, because the fps
setting defines how often packets are send out to prevent network overuse. So if you change the DMX values too
often in a second they may not all been send. Vary the fps parameter to your needs (Default=30).
Note that a bind address is needed on Windows for sending out multicast packets.
:param bind_address: the IP-Address to bind to.
For multicast on a Windows machine this must be set to a proper value otherwise omit.
:param bind_port: optionally bind to a specific port. Default=5568. It is not recommended to change the port.
Change the port number if you have trouble with another program or the sACNreceiver blocking the port
:param source_name: the source name used in the sACN packets.
:param cid: the cid. If not given, a random CID will be generated.
:param fps: the frames per second. See above explanation. Has to be >0
"""
self.source_name: str = source_name
if len(cid) != 16:
cid = tuple(int(random.random() * 255) for _ in range(0, 16))
self.__CID: tuple = cid
self._outputs: Dict[int, Output] = {}
self._fps = fps
self.bindAddress = bind_address
self.bind_port = bind_port
self._output_thread: OutputThread = None
self._universeDiscovery: bool = universeDiscovery
@property
def universeDiscovery(self) -> bool:
return self._universeDiscovery
@universeDiscovery.setter
def universeDiscovery(self, universeDiscovery: bool) -> None:
self._universeDiscovery = universeDiscovery
try: # try to set the value for the output thread
self._output_thread.universeDiscovery = universeDiscovery
except:
pass
def activate_output(self, universe: int) -> None:
"""
Activates a universe that's then starting to sending every second.
See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information
:param universe: the universe to activate
"""
check_universe(universe)
# check, if the universe already exists in the list:
if universe in self._outputs:
return
# add new sending:
new_output = Output(DataPacket(cid=self.__CID, sourceName=self.source_name, universe=universe))
self._outputs[universe] = new_output
def get_active_outputs(self) -> tuple:
"""
Returns a list with all active outputs. Useful when iterating over all sender indexes.
:return: list: a list with int (every int is a activated universe. May be not sorted)
"""
return tuple(self._outputs.keys())
def move_universe(self, universe_from: int, universe_to: int) -> None:
"""
Moves an sending from one universe to another. All settings are being restored and only the universe changes
:param universe_from: the universe that should be moved
:param universe_to: the target universe. An existing universe will be overwritten
"""
check_universe(universe_from)
check_universe(universe_to)
# store the sending object and change the universe in the packet of the sending
tmp_output = self._outputs[universe_from]
tmp_output._packet.universe = universe_to
# deactivate sending
self.deactivate_output(universe_from)
# activate new sending with the new universe
self._outputs[universe_to] = tmp_output
def __getitem__(self, item: int) -> Output:
try:
return self._outputs[item]
except:
return None
def start(self, bind_address=None, bind_port: int = None, fps: int = None) -> None:
"""
Starts or restarts a new Thread with the parameters given in the constructor or
the parameters given in this function.
The parameters in this function do not override the class specific values!
:param bind_address: the IP-Address to bind to
:param bind_port: the port to bind to
:param fps: the fps to use. Note: this is not precisely hold, use for load balance in the network
"""
if bind_address is None:
bind_address = self.bindAddress
if fps is None:
fps = self._fps
if bind_port is None:
bind_port = self.bind_port
self.stop()
self._output_thread = OutputThread(cid=self.__CID, source_name=self.source_name,
outputs=self._outputs, bind_address=bind_address,
bind_port=bind_port, fps=fps, universe_discovery=self._universeDiscovery)
self._output_thread.start()
def stop(self) -> None:
"""
Tries to stop a current running sender. A running Thread will be stopped and should terminate.
"""
try:
self._output_thread.enabled_flag = False
except:
pass
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/sender.py | sACNsender.move_universe | python | def move_universe(self, universe_from: int, universe_to: int) -> None:
check_universe(universe_from)
check_universe(universe_to)
# store the sending object and change the universe in the packet of the sending
tmp_output = self._outputs[universe_from]
tmp_output._packet.universe = universe_to
# deactivate sending
self.deactivate_output(universe_from)
# activate new sending with the new universe
self._outputs[universe_to] = tmp_output | Moves an sending from one universe to another. All settings are being restored and only the universe changes
:param universe_from: the universe that should be moved
:param universe_to: the target universe. An existing universe will be overwritten | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/sender.py#L95-L109 | [
"def check_universe(universe: int):\n if universe not in range(1, 64000):\n raise TypeError(f'Universe must be between [1-63999]! Universe was {universe}')\n",
"def deactivate_output(self, universe: int) -> None:\n \"\"\"\n Deactivates an existing sending. Every data from the existing sending output will be lost.\n (TTL, Multicast, DMX data, ..)\n :param universe: the universe to deactivate. If the universe was not activated before, no error is raised\n \"\"\"\n check_universe(universe)\n try: # try to send out three messages with stream_termination bit set to 1\n self._outputs[universe]._packet.option_StreamTerminated = True\n for i in range(0, 3):\n self._output_thread.send_out(self._outputs[universe])\n except:\n pass\n try:\n del self._outputs[universe]\n except:\n pass\n"
] | class sACNsender:
def __init__(self, bind_address: str = "0.0.0.0", bind_port: int = DEFAULT_PORT,
source_name: str = "default source name", cid: tuple = (),
fps: int = 30, universeDiscovery: bool = True):
"""
Creates a sender object. A sender is used to manage multiple sACN universes and handles their sending.
DMX data is send out every second, when no data changes. Some changes may be not send out, because the fps
setting defines how often packets are send out to prevent network overuse. So if you change the DMX values too
often in a second they may not all been send. Vary the fps parameter to your needs (Default=30).
Note that a bind address is needed on Windows for sending out multicast packets.
:param bind_address: the IP-Address to bind to.
For multicast on a Windows machine this must be set to a proper value otherwise omit.
:param bind_port: optionally bind to a specific port. Default=5568. It is not recommended to change the port.
Change the port number if you have trouble with another program or the sACNreceiver blocking the port
:param source_name: the source name used in the sACN packets.
:param cid: the cid. If not given, a random CID will be generated.
:param fps: the frames per second. See above explanation. Has to be >0
"""
self.source_name: str = source_name
if len(cid) != 16:
cid = tuple(int(random.random() * 255) for _ in range(0, 16))
self.__CID: tuple = cid
self._outputs: Dict[int, Output] = {}
self._fps = fps
self.bindAddress = bind_address
self.bind_port = bind_port
self._output_thread: OutputThread = None
self._universeDiscovery: bool = universeDiscovery
@property
def universeDiscovery(self) -> bool:
return self._universeDiscovery
@universeDiscovery.setter
def universeDiscovery(self, universeDiscovery: bool) -> None:
self._universeDiscovery = universeDiscovery
try: # try to set the value for the output thread
self._output_thread.universeDiscovery = universeDiscovery
except:
pass
def activate_output(self, universe: int) -> None:
"""
Activates a universe that's then starting to sending every second.
See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information
:param universe: the universe to activate
"""
check_universe(universe)
# check, if the universe already exists in the list:
if universe in self._outputs:
return
# add new sending:
new_output = Output(DataPacket(cid=self.__CID, sourceName=self.source_name, universe=universe))
self._outputs[universe] = new_output
def deactivate_output(self, universe: int) -> None:
"""
Deactivates an existing sending. Every data from the existing sending output will be lost.
(TTL, Multicast, DMX data, ..)
:param universe: the universe to deactivate. If the universe was not activated before, no error is raised
"""
check_universe(universe)
try: # try to send out three messages with stream_termination bit set to 1
self._outputs[universe]._packet.option_StreamTerminated = True
for i in range(0, 3):
self._output_thread.send_out(self._outputs[universe])
except:
pass
try:
del self._outputs[universe]
except:
pass
def get_active_outputs(self) -> tuple:
"""
Returns a list with all active outputs. Useful when iterating over all sender indexes.
:return: list: a list with int (every int is a activated universe. May be not sorted)
"""
return tuple(self._outputs.keys())
def __getitem__(self, item: int) -> Output:
try:
return self._outputs[item]
except:
return None
def start(self, bind_address=None, bind_port: int = None, fps: int = None) -> None:
"""
Starts or restarts a new Thread with the parameters given in the constructor or
the parameters given in this function.
The parameters in this function do not override the class specific values!
:param bind_address: the IP-Address to bind to
:param bind_port: the port to bind to
:param fps: the fps to use. Note: this is not precisely hold, use for load balance in the network
"""
if bind_address is None:
bind_address = self.bindAddress
if fps is None:
fps = self._fps
if bind_port is None:
bind_port = self.bind_port
self.stop()
self._output_thread = OutputThread(cid=self.__CID, source_name=self.source_name,
outputs=self._outputs, bind_address=bind_address,
bind_port=bind_port, fps=fps, universe_discovery=self._universeDiscovery)
self._output_thread.start()
def stop(self) -> None:
"""
Tries to stop a current running sender. A running Thread will be stopped and should terminate.
"""
try:
self._output_thread.enabled_flag = False
except:
pass
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/sender.py | sACNsender.start | python | def start(self, bind_address=None, bind_port: int = None, fps: int = None) -> None:
if bind_address is None:
bind_address = self.bindAddress
if fps is None:
fps = self._fps
if bind_port is None:
bind_port = self.bind_port
self.stop()
self._output_thread = OutputThread(cid=self.__CID, source_name=self.source_name,
outputs=self._outputs, bind_address=bind_address,
bind_port=bind_port, fps=fps, universe_discovery=self._universeDiscovery)
self._output_thread.start() | Starts or restarts a new Thread with the parameters given in the constructor or
the parameters given in this function.
The parameters in this function do not override the class specific values!
:param bind_address: the IP-Address to bind to
:param bind_port: the port to bind to
:param fps: the fps to use. Note: this is not precisely hold, use for load balance in the network | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/sender.py#L117-L136 | [
"def stop(self) -> None:\n \"\"\"\n Tries to stop a current running sender. A running Thread will be stopped and should terminate.\n \"\"\"\n try:\n self._output_thread.enabled_flag = False\n except:\n pass\n"
] | class sACNsender:
def __init__(self, bind_address: str = "0.0.0.0", bind_port: int = DEFAULT_PORT,
source_name: str = "default source name", cid: tuple = (),
fps: int = 30, universeDiscovery: bool = True):
"""
Creates a sender object. A sender is used to manage multiple sACN universes and handles their sending.
DMX data is send out every second, when no data changes. Some changes may be not send out, because the fps
setting defines how often packets are send out to prevent network overuse. So if you change the DMX values too
often in a second they may not all been send. Vary the fps parameter to your needs (Default=30).
Note that a bind address is needed on Windows for sending out multicast packets.
:param bind_address: the IP-Address to bind to.
For multicast on a Windows machine this must be set to a proper value otherwise omit.
:param bind_port: optionally bind to a specific port. Default=5568. It is not recommended to change the port.
Change the port number if you have trouble with another program or the sACNreceiver blocking the port
:param source_name: the source name used in the sACN packets.
:param cid: the cid. If not given, a random CID will be generated.
:param fps: the frames per second. See above explanation. Has to be >0
"""
self.source_name: str = source_name
if len(cid) != 16:
cid = tuple(int(random.random() * 255) for _ in range(0, 16))
self.__CID: tuple = cid
self._outputs: Dict[int, Output] = {}
self._fps = fps
self.bindAddress = bind_address
self.bind_port = bind_port
self._output_thread: OutputThread = None
self._universeDiscovery: bool = universeDiscovery
@property
def universeDiscovery(self) -> bool:
return self._universeDiscovery
@universeDiscovery.setter
def universeDiscovery(self, universeDiscovery: bool) -> None:
self._universeDiscovery = universeDiscovery
try: # try to set the value for the output thread
self._output_thread.universeDiscovery = universeDiscovery
except:
pass
def activate_output(self, universe: int) -> None:
"""
Activates a universe that's then starting to sending every second.
See http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf for more information
:param universe: the universe to activate
"""
check_universe(universe)
# check, if the universe already exists in the list:
if universe in self._outputs:
return
# add new sending:
new_output = Output(DataPacket(cid=self.__CID, sourceName=self.source_name, universe=universe))
self._outputs[universe] = new_output
def deactivate_output(self, universe: int) -> None:
"""
Deactivates an existing sending. Every data from the existing sending output will be lost.
(TTL, Multicast, DMX data, ..)
:param universe: the universe to deactivate. If the universe was not activated before, no error is raised
"""
check_universe(universe)
try: # try to send out three messages with stream_termination bit set to 1
self._outputs[universe]._packet.option_StreamTerminated = True
for i in range(0, 3):
self._output_thread.send_out(self._outputs[universe])
except:
pass
try:
del self._outputs[universe]
except:
pass
def get_active_outputs(self) -> tuple:
"""
Returns a list with all active outputs. Useful when iterating over all sender indexes.
:return: list: a list with int (every int is a activated universe. May be not sorted)
"""
return tuple(self._outputs.keys())
def move_universe(self, universe_from: int, universe_to: int) -> None:
"""
Moves an sending from one universe to another. All settings are being restored and only the universe changes
:param universe_from: the universe that should be moved
:param universe_to: the target universe. An existing universe will be overwritten
"""
check_universe(universe_from)
check_universe(universe_to)
# store the sending object and change the universe in the packet of the sending
tmp_output = self._outputs[universe_from]
tmp_output._packet.universe = universe_to
# deactivate sending
self.deactivate_output(universe_from)
# activate new sending with the new universe
self._outputs[universe_to] = tmp_output
def __getitem__(self, item: int) -> Output:
try:
return self._outputs[item]
except:
return None
def stop(self) -> None:
"""
Tries to stop a current running sender. A running Thread will be stopped and should terminate.
"""
try:
self._output_thread.enabled_flag = False
except:
pass
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/receiver.py | sACNreceiver.listen_on | python | def listen_on(self, trigger: str, **kwargs) -> callable:
def decorator(f):
self.register_listener(trigger, f, **kwargs)
return f
return decorator | This is a simple decorator for registering a callback for an event. You can also use 'register_listener'.
A list with all possible options is available via LISTEN_ON_OPTIONS.
:param trigger: Currently supported options: 'universe availability change', 'universe' | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiver.py#L36-L45 | null | class sACNreceiver:
def __init__(self, bind_address: str = '0.0.0.0', bind_port: int = 5568):
"""
Make a receiver for sACN data. Do not forget to start and add callbacks for receiving messages!
:param bind_address: if you are on a Windows system and want to use multicast provide a valid interface
IP-Address! Otherwise omit.
:param bind_port: Default: 5568. It is not recommended to change this value!
Only use when you are know what you are doing!
"""
# If you bind to a specific interface on the Mac, no multicast data will arrive.
# If you try to bind to all interfaces on Windows, no multicast data will arrive.
self._bindAddress = bind_address
self._thread = None
self._callbacks = {'availability': [],
'universe': []} # init with empty list, because otherwise an error gets thrown
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except: # Not all systems support multiple sockets on the same port and interface
pass
self.sock.bind((bind_address, bind_port))
def register_listener(self, trigger: str, func: callable, **kwargs) -> None:
"""
Register a listener for the given trigger. Raises an TypeError when the trigger is not a valid one.
To get a list with all valid triggers, use LISTEN_ON_OPTIONS.
:param trigger: the trigger on which the given callback should be used.
Currently supported: 'universe availability change', 'universe'
:param func: the callback. The parameters depend on the trigger. See README for more information
"""
if trigger in LISTEN_ON_OPTIONS:
if trigger == LISTEN_ON_OPTIONS[1]: # if the trigger is universe, use the universe from args as key
try:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]].append(func)
except:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]] = [func]
try:
self._callbacks[trigger].append(func)
except:
self._callbacks[trigger] = [func]
else:
raise TypeError(f'The given trigger "{trigger}" is not a valid one!')
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
def leave_multicast(self, universe: int) -> None:
"""
Try to leave the multicast group with the specified universe. This does not throw any exception if the group
could not be leaved.
:param universe: the universe to leave the multicast group.
The network hardware has to support the multicast feature!
"""
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
except: # try to leave the multicast group for the universe
pass
def start(self) -> None:
"""
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
"""
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start()
def stop(self) -> None:
"""
Stops a running thread. If no thread was started nothing happens.
"""
try:
self._thread.enabled_flag = False
except: # try to stop the thread
pass
def get_possible_universes(self) -> Tuple[int]:
"""
Get all universes that are possible because a data packet was received. Timeouted data is removed from the list,
so the list may change over time. Depending on sources that are shutting down their streams.
:return: a tuple with all universes that were received so far and hadn't a timeout
"""
return tuple(self._thread.lastDataTimestamps.keys())
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/receiver.py | sACNreceiver.register_listener | python | def register_listener(self, trigger: str, func: callable, **kwargs) -> None:
if trigger in LISTEN_ON_OPTIONS:
if trigger == LISTEN_ON_OPTIONS[1]: # if the trigger is universe, use the universe from args as key
try:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]].append(func)
except:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]] = [func]
try:
self._callbacks[trigger].append(func)
except:
self._callbacks[trigger] = [func]
else:
raise TypeError(f'The given trigger "{trigger}" is not a valid one!') | Register a listener for the given trigger. Raises an TypeError when the trigger is not a valid one.
To get a list with all valid triggers, use LISTEN_ON_OPTIONS.
:param trigger: the trigger on which the given callback should be used.
Currently supported: 'universe availability change', 'universe'
:param func: the callback. The parameters depend on the trigger. See README for more information | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiver.py#L47-L66 | null | class sACNreceiver:
def __init__(self, bind_address: str = '0.0.0.0', bind_port: int = 5568):
"""
Make a receiver for sACN data. Do not forget to start and add callbacks for receiving messages!
:param bind_address: if you are on a Windows system and want to use multicast provide a valid interface
IP-Address! Otherwise omit.
:param bind_port: Default: 5568. It is not recommended to change this value!
Only use when you are know what you are doing!
"""
# If you bind to a specific interface on the Mac, no multicast data will arrive.
# If you try to bind to all interfaces on Windows, no multicast data will arrive.
self._bindAddress = bind_address
self._thread = None
self._callbacks = {'availability': [],
'universe': []} # init with empty list, because otherwise an error gets thrown
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except: # Not all systems support multiple sockets on the same port and interface
pass
self.sock.bind((bind_address, bind_port))
def listen_on(self, trigger: str, **kwargs) -> callable:
"""
This is a simple decorator for registering a callback for an event. You can also use 'register_listener'.
A list with all possible options is available via LISTEN_ON_OPTIONS.
:param trigger: Currently supported options: 'universe availability change', 'universe'
"""
def decorator(f):
self.register_listener(trigger, f, **kwargs)
return f
return decorator
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
def leave_multicast(self, universe: int) -> None:
"""
Try to leave the multicast group with the specified universe. This does not throw any exception if the group
could not be leaved.
:param universe: the universe to leave the multicast group.
The network hardware has to support the multicast feature!
"""
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
except: # try to leave the multicast group for the universe
pass
def start(self) -> None:
"""
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
"""
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start()
def stop(self) -> None:
"""
Stops a running thread. If no thread was started nothing happens.
"""
try:
self._thread.enabled_flag = False
except: # try to stop the thread
pass
def get_possible_universes(self) -> Tuple[int]:
"""
Get all universes that are possible because a data packet was received. Timeouted data is removed from the list,
so the list may change over time. Depending on sources that are shutting down their streams.
:return: a tuple with all universes that were received so far and hadn't a timeout
"""
return tuple(self._thread.lastDataTimestamps.keys())
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/receiver.py | sACNreceiver.join_multicast | python | def join_multicast(self, universe: int) -> None:
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress)) | Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature! | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiver.py#L68-L78 | [
"def calculate_multicast_addr(universe: int) -> str:\n hi_byte = universe >> 8 # a little bit shifting here\n lo_byte = universe & 0xFF # a little bit mask there\n return f\"239.255.{hi_byte}.{lo_byte}\"\n"
] | class sACNreceiver:
def __init__(self, bind_address: str = '0.0.0.0', bind_port: int = 5568):
"""
Make a receiver for sACN data. Do not forget to start and add callbacks for receiving messages!
:param bind_address: if you are on a Windows system and want to use multicast provide a valid interface
IP-Address! Otherwise omit.
:param bind_port: Default: 5568. It is not recommended to change this value!
Only use when you are know what you are doing!
"""
# If you bind to a specific interface on the Mac, no multicast data will arrive.
# If you try to bind to all interfaces on Windows, no multicast data will arrive.
self._bindAddress = bind_address
self._thread = None
self._callbacks = {'availability': [],
'universe': []} # init with empty list, because otherwise an error gets thrown
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except: # Not all systems support multiple sockets on the same port and interface
pass
self.sock.bind((bind_address, bind_port))
def listen_on(self, trigger: str, **kwargs) -> callable:
"""
This is a simple decorator for registering a callback for an event. You can also use 'register_listener'.
A list with all possible options is available via LISTEN_ON_OPTIONS.
:param trigger: Currently supported options: 'universe availability change', 'universe'
"""
def decorator(f):
self.register_listener(trigger, f, **kwargs)
return f
return decorator
def register_listener(self, trigger: str, func: callable, **kwargs) -> None:
"""
Register a listener for the given trigger. Raises an TypeError when the trigger is not a valid one.
To get a list with all valid triggers, use LISTEN_ON_OPTIONS.
:param trigger: the trigger on which the given callback should be used.
Currently supported: 'universe availability change', 'universe'
:param func: the callback. The parameters depend on the trigger. See README for more information
"""
if trigger in LISTEN_ON_OPTIONS:
if trigger == LISTEN_ON_OPTIONS[1]: # if the trigger is universe, use the universe from args as key
try:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]].append(func)
except:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]] = [func]
try:
self._callbacks[trigger].append(func)
except:
self._callbacks[trigger] = [func]
else:
raise TypeError(f'The given trigger "{trigger}" is not a valid one!')
def leave_multicast(self, universe: int) -> None:
"""
Try to leave the multicast group with the specified universe. This does not throw any exception if the group
could not be leaved.
:param universe: the universe to leave the multicast group.
The network hardware has to support the multicast feature!
"""
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
except: # try to leave the multicast group for the universe
pass
def start(self) -> None:
"""
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
"""
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start()
def stop(self) -> None:
"""
Stops a running thread. If no thread was started nothing happens.
"""
try:
self._thread.enabled_flag = False
except: # try to stop the thread
pass
def get_possible_universes(self) -> Tuple[int]:
"""
Get all universes that are possible because a data packet was received. Timeouted data is removed from the list,
so the list may change over time. Depending on sources that are shutting down their streams.
:return: a tuple with all universes that were received so far and hadn't a timeout
"""
return tuple(self._thread.lastDataTimestamps.keys())
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/receiver.py | sACNreceiver.leave_multicast | python | def leave_multicast(self, universe: int) -> None:
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
except: # try to leave the multicast group for the universe
pass | Try to leave the multicast group with the specified universe. This does not throw any exception if the group
could not be leaved.
:param universe: the universe to leave the multicast group.
The network hardware has to support the multicast feature! | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiver.py#L80-L92 | [
"def calculate_multicast_addr(universe: int) -> str:\n hi_byte = universe >> 8 # a little bit shifting here\n lo_byte = universe & 0xFF # a little bit mask there\n return f\"239.255.{hi_byte}.{lo_byte}\"\n"
] | class sACNreceiver:
def __init__(self, bind_address: str = '0.0.0.0', bind_port: int = 5568):
"""
Make a receiver for sACN data. Do not forget to start and add callbacks for receiving messages!
:param bind_address: if you are on a Windows system and want to use multicast provide a valid interface
IP-Address! Otherwise omit.
:param bind_port: Default: 5568. It is not recommended to change this value!
Only use when you are know what you are doing!
"""
# If you bind to a specific interface on the Mac, no multicast data will arrive.
# If you try to bind to all interfaces on Windows, no multicast data will arrive.
self._bindAddress = bind_address
self._thread = None
self._callbacks = {'availability': [],
'universe': []} # init with empty list, because otherwise an error gets thrown
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except: # Not all systems support multiple sockets on the same port and interface
pass
self.sock.bind((bind_address, bind_port))
def listen_on(self, trigger: str, **kwargs) -> callable:
"""
This is a simple decorator for registering a callback for an event. You can also use 'register_listener'.
A list with all possible options is available via LISTEN_ON_OPTIONS.
:param trigger: Currently supported options: 'universe availability change', 'universe'
"""
def decorator(f):
self.register_listener(trigger, f, **kwargs)
return f
return decorator
def register_listener(self, trigger: str, func: callable, **kwargs) -> None:
"""
Register a listener for the given trigger. Raises an TypeError when the trigger is not a valid one.
To get a list with all valid triggers, use LISTEN_ON_OPTIONS.
:param trigger: the trigger on which the given callback should be used.
Currently supported: 'universe availability change', 'universe'
:param func: the callback. The parameters depend on the trigger. See README for more information
"""
if trigger in LISTEN_ON_OPTIONS:
if trigger == LISTEN_ON_OPTIONS[1]: # if the trigger is universe, use the universe from args as key
try:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]].append(func)
except:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]] = [func]
try:
self._callbacks[trigger].append(func)
except:
self._callbacks[trigger] = [func]
else:
raise TypeError(f'The given trigger "{trigger}" is not a valid one!')
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
def start(self) -> None:
"""
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
"""
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start()
def stop(self) -> None:
"""
Stops a running thread. If no thread was started nothing happens.
"""
try:
self._thread.enabled_flag = False
except: # try to stop the thread
pass
def get_possible_universes(self) -> Tuple[int]:
"""
Get all universes that are possible because a data packet was received. Timeouted data is removed from the list,
so the list may change over time. Depending on sources that are shutting down their streams.
:return: a tuple with all universes that were received so far and hadn't a timeout
"""
return tuple(self._thread.lastDataTimestamps.keys())
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/receiver.py | sACNreceiver.start | python | def start(self) -> None:
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start() | Starts a new thread that handles the input. If a thread is already running, the thread will be restarted. | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiver.py#L94-L100 | [
"def stop(self) -> None:\n \"\"\"\n Stops a running thread. If no thread was started nothing happens.\n \"\"\"\n try:\n self._thread.enabled_flag = False\n except: # try to stop the thread\n pass\n"
] | class sACNreceiver:
def __init__(self, bind_address: str = '0.0.0.0', bind_port: int = 5568):
"""
Make a receiver for sACN data. Do not forget to start and add callbacks for receiving messages!
:param bind_address: if you are on a Windows system and want to use multicast provide a valid interface
IP-Address! Otherwise omit.
:param bind_port: Default: 5568. It is not recommended to change this value!
Only use when you are know what you are doing!
"""
# If you bind to a specific interface on the Mac, no multicast data will arrive.
# If you try to bind to all interfaces on Windows, no multicast data will arrive.
self._bindAddress = bind_address
self._thread = None
self._callbacks = {'availability': [],
'universe': []} # init with empty list, because otherwise an error gets thrown
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except: # Not all systems support multiple sockets on the same port and interface
pass
self.sock.bind((bind_address, bind_port))
def listen_on(self, trigger: str, **kwargs) -> callable:
"""
This is a simple decorator for registering a callback for an event. You can also use 'register_listener'.
A list with all possible options is available via LISTEN_ON_OPTIONS.
:param trigger: Currently supported options: 'universe availability change', 'universe'
"""
def decorator(f):
self.register_listener(trigger, f, **kwargs)
return f
return decorator
def register_listener(self, trigger: str, func: callable, **kwargs) -> None:
"""
Register a listener for the given trigger. Raises an TypeError when the trigger is not a valid one.
To get a list with all valid triggers, use LISTEN_ON_OPTIONS.
:param trigger: the trigger on which the given callback should be used.
Currently supported: 'universe availability change', 'universe'
:param func: the callback. The parameters depend on the trigger. See README for more information
"""
if trigger in LISTEN_ON_OPTIONS:
if trigger == LISTEN_ON_OPTIONS[1]: # if the trigger is universe, use the universe from args as key
try:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]].append(func)
except:
self._callbacks[kwargs[LISTEN_ON_OPTIONS[1]]] = [func]
try:
self._callbacks[trigger].append(func)
except:
self._callbacks[trigger] = [func]
else:
raise TypeError(f'The given trigger "{trigger}" is not a valid one!')
def join_multicast(self, universe: int) -> None:
"""
Joins the multicast address that is used for the given universe. Note: If you are on Windows you must have given
a bind IP-Address for this feature to function properly. On the other hand you are not allowed to set a bind
address if you are on any other OS.
:param universe: the universe to join the multicast group.
The network hardware has to support the multicast feature!
"""
self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
def leave_multicast(self, universe: int) -> None:
"""
Try to leave the multicast group with the specified universe. This does not throw any exception if the group
could not be leaved.
:param universe: the universe to leave the multicast group.
The network hardware has to support the multicast feature!
"""
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(calculate_multicast_addr(universe)) +
socket.inet_aton(self._bindAddress))
except: # try to leave the multicast group for the universe
pass
def stop(self) -> None:
"""
Stops a running thread. If no thread was started nothing happens.
"""
try:
self._thread.enabled_flag = False
except: # try to stop the thread
pass
def get_possible_universes(self) -> Tuple[int]:
"""
Get all universes that are possible because a data packet was received. Timeouted data is removed from the list,
so the list may change over time. Depending on sources that are shutting down their streams.
:return: a tuple with all universes that were received so far and hadn't a timeout
"""
return tuple(self._thread.lastDataTimestamps.keys())
def __del__(self):
# stop a potential running thread
self.stop()
|
Hundemeier/sacn | sacn/messages/data_packet.py | DataPacket.dmxData | python | def dmxData(self, data: tuple):
newData = [0]*512
for i in range(0, min(len(data), 512)):
newData[i] = data[i]
self._dmxData = tuple(newData)
# in theory this class supports dynamic length, so the next line is correcting the length
self.length = 126 + len(self._dmxData) | For legacy devices and to prevent errors, the length of the DMX data is normalized to 512 | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/messages/data_packet.py#L69-L78 | null | class DataPacket(RootLayer):
def __init__(self, cid: tuple, sourceName: str, universe: int, dmxData: tuple = (), priority: int = 100,
sequence: int = 0, streamTerminated: bool = False, previewData: bool = False):
self._vector1 = VECTOR_E131_DATA_PACKET
self._vector2 = VECTOR_DMP_SET_PROPERTY
self.sourceName: str = sourceName
self.priority = priority
self._syncAddr = (0, 0) # currently not supported
self.universe = universe
self.option_StreamTerminated: bool = streamTerminated
self.option_PreviewData: bool = previewData
self.sequence = sequence
self.dmxData = dmxData
super().__init__(126 + len(dmxData), cid, VECTOR_ROOT_E131_DATA)
def __str__(self):
return f'sACN DataPacket: Universe: {self.universe}, Priority: {self.priority}, Sequence: {self.sequence} ' \
f'CID: {self._cid}'
@property
def priority(self) -> int:
return self._priority
@priority.setter
def priority(self, priority: int):
if priority not in range(0, 201):
raise TypeError(f'priority must be in range [0-200]! value was {priority}')
self._priority = priority
@property
def universe(self) -> int:
return self._universe
@universe.setter
def universe(self, universe: int):
if universe not in range(1, 64000):
raise TypeError(f'universe must be [1-63999]! value was {universe}')
self._universe = universe
@property
def sequence(self) -> int:
return self._sequence
@sequence.setter
def sequence(self, sequence: int):
if sequence not in range(0, 256):
raise TypeError(f'Sequence is a byte! values: [0-255]! value was {sequence}')
self._sequence = sequence
def sequence_increase(self):
self._sequence += 1
if self._sequence > 0xFF:
self._sequence = 0
@property
def dmxData(self) -> tuple:
return self._dmxData
@dmxData.setter
def getBytes(self) -> tuple:
rtrnList = super().getBytes()
# Flags and Length Framing Layer:-------
rtrnList.extend(make_flagsandlength(self.length - 38))
# Vector Framing Layer:-----------------
rtrnList.extend(self._vector1)
# sourceName:---------------------------
# make a 64 byte long sourceName
tmpSourceName = [0] * 64
for i in range(0, min(len(tmpSourceName), len(self.sourceName))):
tmpSourceName[i] = ord(self.sourceName[i])
rtrnList.extend(tmpSourceName)
# priority------------------------------
rtrnList.append(self._priority)
# syncAddress---------------------------
rtrnList.extend(self._syncAddr)
# sequence------------------------------
rtrnList.append(self._sequence)
# Options Flags:------------------------
tmpOptionsFlags = 0
# stream terminated:
tmpOptionsFlags += int(self.option_StreamTerminated) << 6
# preview data:
tmpOptionsFlags += int(self.option_PreviewData) << 7
rtrnList.append(tmpOptionsFlags)
# universe:-----------------------------
rtrnList.extend(int_to_bytes(self._universe))
# DMP Layer:---------------------------------------------------
# Flags and Length DMP Layer:-----------
rtrnList.extend(make_flagsandlength(self.length - 115))
# Vector DMP Layer:---------------------
rtrnList.append(self._vector2)
# Some static values (Address & Data Type, First Property addr, ...)
rtrnList.extend([0xa1, 0x00, 0x00, 0x00, 0x01])
# Length of the data:-------------------
lengthDmxData = len(self._dmxData)+1
rtrnList.extend(int_to_bytes(lengthDmxData))
# DMX data:-----------------------------
rtrnList.append(0x00) # DMX Start Code
rtrnList.extend(self._dmxData)
return tuple(rtrnList)
@staticmethod
def make_data_packet(raw_data) -> 'DataPacket':
"""
Converts raw byte data to a sACN DataPacket. Note that the raw bytes have to come from a 2016 sACN Message.
This does not support Sync Addresses, Force_Sync option and DMX Start code!
:param raw_data: raw bytes as tuple or list
:return: a DataPacket with the properties set like the raw bytes
"""
# Check if the length is sufficient
if len(raw_data) < 126:
raise TypeError('The length of the provided data is not long enough! Min length is 126!')
# Check if the three Vectors are correct
if tuple(raw_data[18:22]) != tuple(VECTOR_ROOT_E131_DATA) or \
tuple(raw_data[40:44]) != tuple(VECTOR_E131_DATA_PACKET) or \
raw_data[117] != VECTOR_DMP_SET_PROPERTY: # REMEMBER: when slicing: [inclusive:exclusive]
raise TypeError('Some of the vectors in the given raw data are not compatible to the E131 Standard!')
tmpPacket = DataPacket(cid=raw_data[22:38], sourceName=str(raw_data[44:108]),
universe=(0xFF * raw_data[113]) + raw_data[114]) # high byte first
tmpPacket.priority = raw_data[108]
# SyncAddress in the future?!
tmpPacket.sequence = raw_data[111]
tmpPacket.option_PreviewData = bool(raw_data[112] & 0b10000000) # use the 7th bit as preview_data
tmpPacket.option_StreamTerminated = bool(raw_data[112] & 0b01000000)
tmpPacket.dmxData = raw_data[126:638]
return tmpPacket
def calculate_multicast_addr(self) -> str:
return calculate_multicast_addr(self.universe)
|
Hundemeier/sacn | sacn/messages/data_packet.py | DataPacket.make_data_packet | python | def make_data_packet(raw_data) -> 'DataPacket':
# Check if the length is sufficient
if len(raw_data) < 126:
raise TypeError('The length of the provided data is not long enough! Min length is 126!')
# Check if the three Vectors are correct
if tuple(raw_data[18:22]) != tuple(VECTOR_ROOT_E131_DATA) or \
tuple(raw_data[40:44]) != tuple(VECTOR_E131_DATA_PACKET) or \
raw_data[117] != VECTOR_DMP_SET_PROPERTY: # REMEMBER: when slicing: [inclusive:exclusive]
raise TypeError('Some of the vectors in the given raw data are not compatible to the E131 Standard!')
tmpPacket = DataPacket(cid=raw_data[22:38], sourceName=str(raw_data[44:108]),
universe=(0xFF * raw_data[113]) + raw_data[114]) # high byte first
tmpPacket.priority = raw_data[108]
# SyncAddress in the future?!
tmpPacket.sequence = raw_data[111]
tmpPacket.option_PreviewData = bool(raw_data[112] & 0b10000000) # use the 7th bit as preview_data
tmpPacket.option_StreamTerminated = bool(raw_data[112] & 0b01000000)
tmpPacket.dmxData = raw_data[126:638]
return tmpPacket | Converts raw byte data to a sACN DataPacket. Note that the raw bytes have to come from a 2016 sACN Message.
This does not support Sync Addresses, Force_Sync option and DMX Start code!
:param raw_data: raw bytes as tuple or list
:return: a DataPacket with the properties set like the raw bytes | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/messages/data_packet.py#L124-L148 | null | class DataPacket(RootLayer):
def __init__(self, cid: tuple, sourceName: str, universe: int, dmxData: tuple = (), priority: int = 100,
sequence: int = 0, streamTerminated: bool = False, previewData: bool = False):
self._vector1 = VECTOR_E131_DATA_PACKET
self._vector2 = VECTOR_DMP_SET_PROPERTY
self.sourceName: str = sourceName
self.priority = priority
self._syncAddr = (0, 0) # currently not supported
self.universe = universe
self.option_StreamTerminated: bool = streamTerminated
self.option_PreviewData: bool = previewData
self.sequence = sequence
self.dmxData = dmxData
super().__init__(126 + len(dmxData), cid, VECTOR_ROOT_E131_DATA)
def __str__(self):
return f'sACN DataPacket: Universe: {self.universe}, Priority: {self.priority}, Sequence: {self.sequence} ' \
f'CID: {self._cid}'
@property
def priority(self) -> int:
return self._priority
@priority.setter
def priority(self, priority: int):
if priority not in range(0, 201):
raise TypeError(f'priority must be in range [0-200]! value was {priority}')
self._priority = priority
@property
def universe(self) -> int:
return self._universe
@universe.setter
def universe(self, universe: int):
if universe not in range(1, 64000):
raise TypeError(f'universe must be [1-63999]! value was {universe}')
self._universe = universe
@property
def sequence(self) -> int:
return self._sequence
@sequence.setter
def sequence(self, sequence: int):
if sequence not in range(0, 256):
raise TypeError(f'Sequence is a byte! values: [0-255]! value was {sequence}')
self._sequence = sequence
def sequence_increase(self):
self._sequence += 1
if self._sequence > 0xFF:
self._sequence = 0
@property
def dmxData(self) -> tuple:
return self._dmxData
@dmxData.setter
def dmxData(self, data: tuple):
"""
For legacy devices and to prevent errors, the length of the DMX data is normalized to 512
"""
newData = [0]*512
for i in range(0, min(len(data), 512)):
newData[i] = data[i]
self._dmxData = tuple(newData)
# in theory this class supports dynamic length, so the next line is correcting the length
self.length = 126 + len(self._dmxData)
def getBytes(self) -> tuple:
rtrnList = super().getBytes()
# Flags and Length Framing Layer:-------
rtrnList.extend(make_flagsandlength(self.length - 38))
# Vector Framing Layer:-----------------
rtrnList.extend(self._vector1)
# sourceName:---------------------------
# make a 64 byte long sourceName
tmpSourceName = [0] * 64
for i in range(0, min(len(tmpSourceName), len(self.sourceName))):
tmpSourceName[i] = ord(self.sourceName[i])
rtrnList.extend(tmpSourceName)
# priority------------------------------
rtrnList.append(self._priority)
# syncAddress---------------------------
rtrnList.extend(self._syncAddr)
# sequence------------------------------
rtrnList.append(self._sequence)
# Options Flags:------------------------
tmpOptionsFlags = 0
# stream terminated:
tmpOptionsFlags += int(self.option_StreamTerminated) << 6
# preview data:
tmpOptionsFlags += int(self.option_PreviewData) << 7
rtrnList.append(tmpOptionsFlags)
# universe:-----------------------------
rtrnList.extend(int_to_bytes(self._universe))
# DMP Layer:---------------------------------------------------
# Flags and Length DMP Layer:-----------
rtrnList.extend(make_flagsandlength(self.length - 115))
# Vector DMP Layer:---------------------
rtrnList.append(self._vector2)
# Some static values (Address & Data Type, First Property addr, ...)
rtrnList.extend([0xa1, 0x00, 0x00, 0x00, 0x01])
# Length of the data:-------------------
lengthDmxData = len(self._dmxData)+1
rtrnList.extend(int_to_bytes(lengthDmxData))
# DMX data:-----------------------------
rtrnList.append(0x00) # DMX Start Code
rtrnList.extend(self._dmxData)
return tuple(rtrnList)
@staticmethod
def calculate_multicast_addr(self) -> str:
return calculate_multicast_addr(self.universe)
|
Hundemeier/sacn | sacn/messages/root_layer.py | RootLayer.getBytes | python | def getBytes(self) -> list:
'''Returns the Root layer as list with bytes'''
tmpList = []
tmpList.extend(_FIRST_INDEX)
# first append the high byte from the Flags and Length
# high 4 bit: 0x7 then the bits 8-11(indexes) from _length
length = self.length - 16
tmpList.append((0x7 << 4) + (length >> 8))
# Then append the lower 8 bits from _length
tmpList.append(length & 0xFF)
tmpList.extend(self._vector)
tmpList.extend(self._cid)
return tmpList | Returns the Root layer as list with bytes | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/messages/root_layer.py#L33-L46 | null | class RootLayer:
def __init__(self, length: int, cid: tuple, vector: tuple):
self.length = length
if(len(vector) != 4):
raise ValueError('the length of the vector is not 4!')
self._vector = vector
if(len(cid) != 16):
raise ValueError('the length of the CID is not 16!')
self._cid = cid
@property
def length(self) -> int:
return self._length
@length.setter
def length(self, value: int):
self._length = value & 0xFFF # only use the least 12-Bit
|
Hundemeier/sacn | sacn/receiving/receiver_thread.py | receiverThread.is_legal_sequence | python | def is_legal_sequence(self, packet: DataPacket) -> bool:
# if the sequence of the packet is smaller than the last received sequence, return false
# therefore calculate the difference between the two values:
try: # try, because self.lastSequence might not been initialized
diff = packet.sequence - self.lastSequence[packet.universe]
# if diff is between ]-20,0], return False for a bad packet sequence
if 0 >= diff > -20:
return False
except:
pass
# if the sequence is good, return True and refresh the list with the new value
self.lastSequence[packet.universe] = packet.sequence
return True | Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiving/receiver_thread.py#L109-L127 | null | class receiverThread(threading.Thread):
def __init__(self, socket, callbacks: Dict[any, list]):
"""
This is a private class and should not be used elsewhere. It handles the while loop running in the thread.
:param socket: the socket to use to listen. It will not be initalized and only the socket.recv function is used.
And the socket.settimeout function is also used
:param callbacks: the list with all callbacks
"""
self.enabled_flag = True
self.socket = socket
self.callbacks: dict = callbacks
# previousData for storing the last data that was send in a universe to check if the data has changed
self.previousData: dict = {}
# priorities are stored here. This is for checking if the incoming data has the best priority.
# universes are the keys and
# the value is a tuple with the last priority and the time when this priority recently was received
self.priorities: Dict[int, tuple] = {}
# store the last timestamp when something on an universe arrived for checking for timeouts
self.lastDataTimestamps: dict = {}
# store the last sequence number of a universe here:
self.lastSequence: dict = {}
super().__init__(name='sACN input/receiver thread')
def run(self):
logging.info(f'Started new sACN receiver thread')
self.socket.settimeout(0.1) # timeout as 100ms
self.enabled_flag = True
while self.enabled_flag:
# before receiving: check for timeouts
self.check_for_timeouts()
# receive the data
try:
raw_data, ip_sender = list(self.socket.recvfrom(1144)) # 1144 because the longest possible packet
# in the sACN standard is the universe discovery packet with a max length of 1144
except socket.timeout:
continue # if a timeout happens just go through while from the beginning
try:
tmp_packet = DataPacket.make_data_packet(raw_data)
except: # try to make a DataPacket. If it fails just go over it
continue
logging.debug(f'Received sACN packet:\n{tmp_packet}')
self.check_for_stream_terminated_and_refresh_timestamp(tmp_packet)
self.refresh_priorities(tmp_packet)
if not self.is_legal_priority(tmp_packet):
continue
if not self.is_legal_sequence(tmp_packet): # check for bad sequence number
continue
self.fire_callbacks_universe(tmp_packet)
logging.info('Stopped sACN receiver thread')
def check_for_timeouts(self) -> None:
# check all DataTimestamps for timeouts
for key, value in list(self.lastDataTimestamps.items()):
# this is converted to list, because the length of the dict changes
if check_timeout(value):
self.fire_timeout_callback_and_delete(key)
def check_for_stream_terminated_and_refresh_timestamp(self, packet: DataPacket) -> None:
# refresh the last timestamp on a universe, but check if its the last message of a stream
# (the stream is terminated by the Stream termination bit)
if packet.option_StreamTerminated:
self.fire_timeout_callback_and_delete(packet.universe)
else:
# check if we add or refresh the data in lastDataTimestamps
if packet.universe not in self.lastDataTimestamps.keys():
for callback in self.callbacks[LISTEN_ON_OPTIONS[0]]:
try: # fire callbacks if this is the first received packet for this universe
callback(universe=packet.universe, changed='available')
except:
pass
self.lastDataTimestamps[packet.universe] = current_time_millis()
def fire_timeout_callback_and_delete(self, universe: int):
for callback in self.callbacks[LISTEN_ON_OPTIONS[0]]:
try:
callback(universe=universe, changed='timeout')
except:
pass
# delete the timestamp so that the callback is not fired multiple times
del self.lastDataTimestamps[universe]
# delete sequence entries so that no packet out of order problems occur
try:
del self.lastSequence[universe]
except Exception:
pass # sometimes an error occurs here TODO: check why here comes an error
def refresh_priorities(self, packet: DataPacket) -> None:
# check the priority and refresh the priorities dict
# check if the stored priority has timeouted and make the current packets priority the new one
if packet.universe not in self.priorities.keys() or \
self.priorities[packet.universe] is None or \
check_timeout(self.priorities[packet.universe][1]) or \
self.priorities[packet.universe][0] <= packet.priority: # if the send priority is higher or
# equal than the stored one, than make the priority the new one
self.priorities[packet.universe] = (packet.priority, current_time_millis())
def is_legal_priority(self, packet: DataPacket):
"""
Check if the given packet has high enough priority for the stored values for the packet's universe.
:param packet: the packet to check
:return: returns True if the priority is good. Otherwise False
"""
# check if the packet's priority is high enough to get processed
if packet.universe not in self.callbacks.keys() or \
packet.priority < self.priorities[packet.universe][0]:
return False # return if the universe is not interesting
else:
return True
def fire_callbacks_universe(self, packet: DataPacket) -> None:
# call the listeners for the universe but before check if the data has changed
# check if there are listeners for the universe before proceeding
if packet.universe not in self.previousData.keys() or \
self.previousData[packet.universe] is None or \
self.previousData[packet.universe] != packet.dmxData:
logging.debug('')
# set previous data and inherit callbacks
self.previousData[packet.universe] = packet.dmxData
for callback in self.callbacks[packet.universe]:
callback(packet)
|
Hundemeier/sacn | sacn/receiving/receiver_thread.py | receiverThread.is_legal_priority | python | def is_legal_priority(self, packet: DataPacket):
# check if the packet's priority is high enough to get processed
if packet.universe not in self.callbacks.keys() or \
packet.priority < self.priorities[packet.universe][0]:
return False # return if the universe is not interesting
else:
return True | Check if the given packet has high enough priority for the stored values for the packet's universe.
:param packet: the packet to check
:return: returns True if the priority is good. Otherwise False | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/receiving/receiver_thread.py#L129-L140 | null | class receiverThread(threading.Thread):
def __init__(self, socket, callbacks: Dict[any, list]):
"""
This is a private class and should not be used elsewhere. It handles the while loop running in the thread.
:param socket: the socket to use to listen. It will not be initalized and only the socket.recv function is used.
And the socket.settimeout function is also used
:param callbacks: the list with all callbacks
"""
self.enabled_flag = True
self.socket = socket
self.callbacks: dict = callbacks
# previousData for storing the last data that was send in a universe to check if the data has changed
self.previousData: dict = {}
# priorities are stored here. This is for checking if the incoming data has the best priority.
# universes are the keys and
# the value is a tuple with the last priority and the time when this priority recently was received
self.priorities: Dict[int, tuple] = {}
# store the last timestamp when something on an universe arrived for checking for timeouts
self.lastDataTimestamps: dict = {}
# store the last sequence number of a universe here:
self.lastSequence: dict = {}
super().__init__(name='sACN input/receiver thread')
def run(self):
logging.info(f'Started new sACN receiver thread')
self.socket.settimeout(0.1) # timeout as 100ms
self.enabled_flag = True
while self.enabled_flag:
# before receiving: check for timeouts
self.check_for_timeouts()
# receive the data
try:
raw_data, ip_sender = list(self.socket.recvfrom(1144)) # 1144 because the longest possible packet
# in the sACN standard is the universe discovery packet with a max length of 1144
except socket.timeout:
continue # if a timeout happens just go through while from the beginning
try:
tmp_packet = DataPacket.make_data_packet(raw_data)
except: # try to make a DataPacket. If it fails just go over it
continue
logging.debug(f'Received sACN packet:\n{tmp_packet}')
self.check_for_stream_terminated_and_refresh_timestamp(tmp_packet)
self.refresh_priorities(tmp_packet)
if not self.is_legal_priority(tmp_packet):
continue
if not self.is_legal_sequence(tmp_packet): # check for bad sequence number
continue
self.fire_callbacks_universe(tmp_packet)
logging.info('Stopped sACN receiver thread')
def check_for_timeouts(self) -> None:
# check all DataTimestamps for timeouts
for key, value in list(self.lastDataTimestamps.items()):
# this is converted to list, because the length of the dict changes
if check_timeout(value):
self.fire_timeout_callback_and_delete(key)
def check_for_stream_terminated_and_refresh_timestamp(self, packet: DataPacket) -> None:
# refresh the last timestamp on a universe, but check if its the last message of a stream
# (the stream is terminated by the Stream termination bit)
if packet.option_StreamTerminated:
self.fire_timeout_callback_and_delete(packet.universe)
else:
# check if we add or refresh the data in lastDataTimestamps
if packet.universe not in self.lastDataTimestamps.keys():
for callback in self.callbacks[LISTEN_ON_OPTIONS[0]]:
try: # fire callbacks if this is the first received packet for this universe
callback(universe=packet.universe, changed='available')
except:
pass
self.lastDataTimestamps[packet.universe] = current_time_millis()
def fire_timeout_callback_and_delete(self, universe: int):
for callback in self.callbacks[LISTEN_ON_OPTIONS[0]]:
try:
callback(universe=universe, changed='timeout')
except:
pass
# delete the timestamp so that the callback is not fired multiple times
del self.lastDataTimestamps[universe]
# delete sequence entries so that no packet out of order problems occur
try:
del self.lastSequence[universe]
except Exception:
pass # sometimes an error occurs here TODO: check why here comes an error
def refresh_priorities(self, packet: DataPacket) -> None:
# check the priority and refresh the priorities dict
# check if the stored priority has timeouted and make the current packets priority the new one
if packet.universe not in self.priorities.keys() or \
self.priorities[packet.universe] is None or \
check_timeout(self.priorities[packet.universe][1]) or \
self.priorities[packet.universe][0] <= packet.priority: # if the send priority is higher or
# equal than the stored one, than make the priority the new one
self.priorities[packet.universe] = (packet.priority, current_time_millis())
def is_legal_sequence(self, packet: DataPacket) -> bool:
"""
Check if the Sequence number of the DataPacket is legal.
For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf.
:param packet: the packet to check
:return: true if the sequence is legal. False if the sequence number is bad
"""
# if the sequence of the packet is smaller than the last received sequence, return false
# therefore calculate the difference between the two values:
try: # try, because self.lastSequence might not been initialized
diff = packet.sequence - self.lastSequence[packet.universe]
# if diff is between ]-20,0], return False for a bad packet sequence
if 0 >= diff > -20:
return False
except:
pass
# if the sequence is good, return True and refresh the list with the new value
self.lastSequence[packet.universe] = packet.sequence
return True
def fire_callbacks_universe(self, packet: DataPacket) -> None:
# call the listeners for the universe but before check if the data has changed
# check if there are listeners for the universe before proceeding
if packet.universe not in self.previousData.keys() or \
self.previousData[packet.universe] is None or \
self.previousData[packet.universe] != packet.dmxData:
logging.debug('')
# set previous data and inherit callbacks
self.previousData[packet.universe] = packet.dmxData
for callback in self.callbacks[packet.universe]:
callback(packet)
|
Hundemeier/sacn | sacn/messages/universe_discovery.py | convert_raw_data_to_universes | python | def convert_raw_data_to_universes(raw_data) -> tuple:
if len(raw_data)%2 != 0:
raise TypeError('The given data has not a length that is a multiple of 2!')
rtrnList = []
for i in range(0, len(raw_data), 2):
rtrnList.append(two_bytes_to_int(raw_data[i], raw_data[i+1]))
return tuple(rtrnList) | converts the raw data to a readable universes tuple. The raw_data is scanned from index 0 and has to have
16-bit numbers with high byte first. The data is converted from the start to the beginning!
:param raw_data: the raw data to convert
:return: tuple full with 16-bit numbers | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/messages/universe_discovery.py#L131-L143 | [
"def two_bytes_to_int(hi_byte: int, low_byte: int) -> int:\n \"\"\"\n Converts two bytes to a normal integer value.\n :param hi_byte: the high byte\n :param low_byte: the low byte\n :return: converted integer that has a value between [0-65535]\n \"\"\"\n return ((hi_byte & 0xFF)*256) + (low_byte & 0xFF)\n"
] | """
This class represents an universe discovery packet of the E1.31 Standard.
"""
from typing import List
from sacn.messages.root_layer import RootLayer, VECTOR_ROOT_E131_EXTENDED, \
VECTOR_E131_EXTENDED_DISCOVERY, VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST,\
make_flagsandlength, int_to_bytes
class UniverseDiscoveryPacket(RootLayer):
def __init__(self, cid: tuple, sourceName: str, universes: tuple, page: int = 0, lastPage: int = 0):
self.sourceName: str = sourceName
self._page: int = page
self._lastPage: int = lastPage
self._universes: list = universes
super().__init__((len(universes)*2)+120, cid, VECTOR_ROOT_E131_EXTENDED)
@property
def page(self) -> int:
return self._page
@page.setter
def page(self, page: int):
if page not in range(0, 256):
raise TypeError(f'Page is a byte! values: [0-255]! value was {page}')
self._page = page
@property
def lastPage(self) -> int:
return self._page
@lastPage.setter
def lastPage(self, lastPage: int):
if lastPage not in range(0, 256):
raise TypeError(f'Page is a byte! values: [0-255]! value was {lastPage}')
self._page = lastPage
@property
def universes(self) -> tuple:
return tuple(self._universes)
@universes.setter
def universes(self, universes: tuple):
if len(universes) > 512:
raise TypeError(f'Universes is a tuple with a max length of 512! The data in the tuple has to be int! '
f'Length was {len(universes)}')
self._universes = sorted(universes)
self.length = 121+(len(universes)*2) # generate new length value for the packet
def getBytes(self) -> tuple:
rtrnList = super().getBytes()
# Flags and Length Framing Layer:--------------------
rtrnList.extend(make_flagsandlength(self.length - 38))
# Vector Framing Layer:------------------------------
rtrnList.extend(VECTOR_E131_EXTENDED_DISCOVERY)
# source Name Framing Layer:-------------------------
# make a 64 byte long sourceName
tmpSourceName = [0] * 64
for i in range(0, min(len(tmpSourceName), len(self.sourceName))):
tmpSourceName[i] = ord(self.sourceName[i])
rtrnList.extend(tmpSourceName)
# reserved fields:-----------------------------------
rtrnList.extend([0]*4)
# Universe Discovery Layer:-------------------------------------
# Flags and Length:----------------------------------
rtrnList.extend(make_flagsandlength(self.length - 112))
# Vector UDL:----------------------------------------
rtrnList.extend(VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST)
# page:----------------------------------------------
rtrnList.append(self._page & 0xFF)
# last page:-----------------------------------------
rtrnList.append(self._lastPage & 0xFF)
# universes:-----------------------------------------
for universe in self._universes: # universe is a 16-bit number!
rtrnList.extend(int_to_bytes(universe))
return tuple(rtrnList)
@staticmethod
def make_universe_discovery_packet(raw_data) -> 'UniverseDiscoveryPacket':
# Check if the length is sufficient
if len(raw_data) < 120:
raise TypeError('The length of the provided data is not long enough! Min length is 120!')
# Check if the three Vectors are correct
# REMEMBER: when slicing: [inclusive:exclusive]
if tuple(raw_data[18:22]) != tuple(VECTOR_ROOT_E131_EXTENDED) or \
tuple(raw_data[40:44]) != tuple(VECTOR_E131_EXTENDED_DISCOVERY) or \
tuple(raw_data[114:118]) != tuple(VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST):
raise TypeError('Some of the vectors in the given raw data are not compatible to the E131 Standard!')
# tricky part: convert plain bytes to a useful list of 16-bit values for further use
# Problem: the given raw_byte can be longer than the dynamic length of the list of universes
# first: extract the length from the Universe Discovery Layer (UDL)
length = (two_bytes_to_int(raw_data[112], raw_data[113]) & 0xFFF) - 8
# remember: UDL has 8 bytes plus the universes
# remember: Flags and length includes a 12-bit length field
universes = convert_raw_data_to_universes(raw_data[120:120+length])
tmp_packet = UniverseDiscoveryPacket(cid=raw_data[22:38], sourceName=str(raw_data[44:108]), universes=universes)
tmp_packet._page = raw_data[118]
tmp_packet._lastPage = raw_data[119]
return tmp_packet
@staticmethod
def make_multiple_uni_disc_packets(cid: tuple, sourceName: str, universes: list) -> List['UniverseDiscoveryPacket']:
"""
Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets
"""
tmpList = []
if len(universes)%512 != 0:
num_of_packets = int(len(universes)/512)+1
else: # just get how long the list has to be. Just read and think about the if statement.
# Should be self-explaining
num_of_packets = int(len(universes)/512)
universes.sort() # E1.31 wants that the send out universes are sorted
for i in range(0, num_of_packets):
if i == num_of_packets-1:
tmpUniverses = universes[i * 512:len(universes)]
# if we are here, then the for is in the last loop
else:
tmpUniverses = universes[i * 512:(i+1) * 512]
# create new UniverseDiscoveryPacket and append it to the list. Page and lastPage are getting special values
tmpList.append(UniverseDiscoveryPacket(cid=cid, sourceName=sourceName, universes=tmpUniverses,
page=i, lastPage=num_of_packets-1))
return tmpList
def two_bytes_to_int(hi_byte: int, low_byte: int) -> int:
"""
Converts two bytes to a normal integer value.
:param hi_byte: the high byte
:param low_byte: the low byte
:return: converted integer that has a value between [0-65535]
"""
return ((hi_byte & 0xFF)*256) + (low_byte & 0xFF)
|
Hundemeier/sacn | sacn/messages/universe_discovery.py | UniverseDiscoveryPacket.make_multiple_uni_disc_packets | python | def make_multiple_uni_disc_packets(cid: tuple, sourceName: str, universes: list) -> List['UniverseDiscoveryPacket']:
tmpList = []
if len(universes)%512 != 0:
num_of_packets = int(len(universes)/512)+1
else: # just get how long the list has to be. Just read and think about the if statement.
# Should be self-explaining
num_of_packets = int(len(universes)/512)
universes.sort() # E1.31 wants that the send out universes are sorted
for i in range(0, num_of_packets):
if i == num_of_packets-1:
tmpUniverses = universes[i * 512:len(universes)]
# if we are here, then the for is in the last loop
else:
tmpUniverses = universes[i * 512:(i+1) * 512]
# create new UniverseDiscoveryPacket and append it to the list. Page and lastPage are getting special values
tmpList.append(UniverseDiscoveryPacket(cid=cid, sourceName=sourceName, universes=tmpUniverses,
page=i, lastPage=num_of_packets-1))
return tmpList | Creates a list with universe discovery packets based on the given data. It creates automatically enough packets
for the given universes list.
:param cid: the cid to use in all packets
:param sourceName: the source name to use in all packets
:param universes: the universes. Can be longer than 512, but has to be shorter than 256*512.
The values in the list should be [1-63999]
:return: a list full of universe discovery packets | train | https://github.com/Hundemeier/sacn/blob/f08bf3d7554a1ed2870f23a9e0e7b89a4a509231/sacn/messages/universe_discovery.py#L102-L128 | null | class UniverseDiscoveryPacket(RootLayer):
def __init__(self, cid: tuple, sourceName: str, universes: tuple, page: int = 0, lastPage: int = 0):
self.sourceName: str = sourceName
self._page: int = page
self._lastPage: int = lastPage
self._universes: list = universes
super().__init__((len(universes)*2)+120, cid, VECTOR_ROOT_E131_EXTENDED)
@property
def page(self) -> int:
return self._page
@page.setter
def page(self, page: int):
if page not in range(0, 256):
raise TypeError(f'Page is a byte! values: [0-255]! value was {page}')
self._page = page
@property
def lastPage(self) -> int:
return self._page
@lastPage.setter
def lastPage(self, lastPage: int):
if lastPage not in range(0, 256):
raise TypeError(f'Page is a byte! values: [0-255]! value was {lastPage}')
self._page = lastPage
@property
def universes(self) -> tuple:
return tuple(self._universes)
@universes.setter
def universes(self, universes: tuple):
if len(universes) > 512:
raise TypeError(f'Universes is a tuple with a max length of 512! The data in the tuple has to be int! '
f'Length was {len(universes)}')
self._universes = sorted(universes)
self.length = 121+(len(universes)*2) # generate new length value for the packet
def getBytes(self) -> tuple:
rtrnList = super().getBytes()
# Flags and Length Framing Layer:--------------------
rtrnList.extend(make_flagsandlength(self.length - 38))
# Vector Framing Layer:------------------------------
rtrnList.extend(VECTOR_E131_EXTENDED_DISCOVERY)
# source Name Framing Layer:-------------------------
# make a 64 byte long sourceName
tmpSourceName = [0] * 64
for i in range(0, min(len(tmpSourceName), len(self.sourceName))):
tmpSourceName[i] = ord(self.sourceName[i])
rtrnList.extend(tmpSourceName)
# reserved fields:-----------------------------------
rtrnList.extend([0]*4)
# Universe Discovery Layer:-------------------------------------
# Flags and Length:----------------------------------
rtrnList.extend(make_flagsandlength(self.length - 112))
# Vector UDL:----------------------------------------
rtrnList.extend(VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST)
# page:----------------------------------------------
rtrnList.append(self._page & 0xFF)
# last page:-----------------------------------------
rtrnList.append(self._lastPage & 0xFF)
# universes:-----------------------------------------
for universe in self._universes: # universe is a 16-bit number!
rtrnList.extend(int_to_bytes(universe))
return tuple(rtrnList)
@staticmethod
def make_universe_discovery_packet(raw_data) -> 'UniverseDiscoveryPacket':
# Check if the length is sufficient
if len(raw_data) < 120:
raise TypeError('The length of the provided data is not long enough! Min length is 120!')
# Check if the three Vectors are correct
# REMEMBER: when slicing: [inclusive:exclusive]
if tuple(raw_data[18:22]) != tuple(VECTOR_ROOT_E131_EXTENDED) or \
tuple(raw_data[40:44]) != tuple(VECTOR_E131_EXTENDED_DISCOVERY) or \
tuple(raw_data[114:118]) != tuple(VECTOR_UNIVERSE_DISCOVERY_UNIVERSE_LIST):
raise TypeError('Some of the vectors in the given raw data are not compatible to the E131 Standard!')
# tricky part: convert plain bytes to a useful list of 16-bit values for further use
# Problem: the given raw_byte can be longer than the dynamic length of the list of universes
# first: extract the length from the Universe Discovery Layer (UDL)
length = (two_bytes_to_int(raw_data[112], raw_data[113]) & 0xFFF) - 8
# remember: UDL has 8 bytes plus the universes
# remember: Flags and length includes a 12-bit length field
universes = convert_raw_data_to_universes(raw_data[120:120+length])
tmp_packet = UniverseDiscoveryPacket(cid=raw_data[22:38], sourceName=str(raw_data[44:108]), universes=universes)
tmp_packet._page = raw_data[118]
tmp_packet._lastPage = raw_data[119]
return tmp_packet
@staticmethod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.