repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vismartltd/edx-platform | common/test/acceptance/tests/test_cohorted_courseware.py | 123 | 9754 | """
End-to-end test for cohorted courseware. This uses both Studio and LMS.
"""
import json
from nose.plugins.attrib import attr
from studio.base_studio_test import ContainerBase
from ..pages.studio.settings_group_configurations import GroupConfigurationsPage
from ..pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ..fixtures.course import XBlockFixtureDesc
from ..fixtures import LMS_BASE_URL
from ..pages.studio.component_editor import ComponentVisibilityEditorView
from ..pages.lms.instructor_dashboard import InstructorDashboardPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.auto_auth import AutoAuthPage as LmsAutoAuthPage
from ..tests.lms.test_lms_user_preview import verify_expected_problem_visibility
from bok_choy.promise import EmptyPromise
@attr('shard_5')
class EndToEndCohortedCoursewareTest(ContainerBase):
def setUp(self, is_staff=True):
super(EndToEndCohortedCoursewareTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_student"
self.cohort_a_student_email = "[email protected]"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_student"
self.cohort_b_student_email = "[email protected]"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "[email protected]"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
# Start logged in as the staff user.
StudioAutoAuthPage(
self.browser, username=self.staff_user["username"], email=self.staff_user["email"]
).visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_problem = 'GROUP A CONTENT'
self.group_b_problem = 'GROUP B CONTENT'
self.group_a_and_b_problem = 'GROUP A AND B CONTENT'
self.visible_to_all_problem = 'VISIBLE TO ALL CONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('problem', self.group_a_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.group_a_and_b_problem, data='<problem></problem>'),
XBlockFixtureDesc('problem', self.visible_to_all_problem, data='<problem></problem>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_problems_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing problems to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(problem_index, content_group, second_content_group=None):
problem = container_page.xblocks[problem_index]
problem.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, problem.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
def view_cohorted_content_as_different_users(self):
"""
View content as staff, student in Cohort A, student in Cohort B, and student in Default Cohort.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
def login_and_verify_visible_problems(username, email, expected_problems):
LmsAutoAuthPage(
self.browser, username=username, email=email, course_id=self.course_id
).visit()
courseware_page.visit()
verify_expected_problem_visibility(self, courseware_page, expected_problems)
login_and_verify_visible_problems(
self.staff_user["username"], self.staff_user["email"],
[self.group_a_problem, self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_a_student_username, self.cohort_a_student_email,
[self.group_a_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_b_student_username, self.cohort_b_student_email,
[self.group_b_problem, self.group_a_and_b_problem, self.visible_to_all_problem]
)
login_and_verify_visible_problems(
self.cohort_default_student_username, self.cohort_default_student_email,
[self.visible_to_all_problem]
)
def test_cohorted_courseware(self):
"""
Scenario: Can create content that is only visible to students in particular cohorts
Given that I have course with 4 problems, 1 staff member, and 3 students
When I enable cohorts in the course
And I create two content groups, Content Group A, and Content Group B, in the course
And I link one problem to Content Group A
And I link one problem to Content Group B
And I link one problem to both Content Group A and Content Group B
And one problem remains unlinked to any Content Group
And I create two manual cohorts, Cohort A and Cohort B,
linked to Content Group A and Content Group B, respectively
And I assign one student to each manual cohort
And one student remains in the default cohort
Then the staff member can see all 4 problems
And the student in Cohort A can see all the problems except the one linked to Content Group B
And the student in Cohort B can see all the problems except the one linked to Content Group A
And the student in the default cohort can ony see the problem that is unlinked to any Content Group
"""
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_problems_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self.view_cohorted_content_as_different_users()
| agpl-3.0 |
commtrack/temp-aquatest | apps/buildmanager/api/resources.py | 1 | 5466 | import os
import bz2
import sys
import logging
import traceback
import cStringIO
import tempfile
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.core.servers.basehttp import FileWrapper
from django.core import serializers
from buildmanager.models import Project, ProjectBuild
from transformers.zip import TarCompressor, build_tarfile
from hq.models import Domain
from django_rest_interface import util
def get_builds(request):
"""Takes a POST containing a tar of all MD5's
and returns a tar of all missing submissions
Heh, this is explicitly against good REST methodology
We leave this inside the django-rest 'Resource' so we can
use their authentication tools
"""
try:
return _get_builds(request)
except Exception, e:
type, value, tb = sys.exc_info()
logging.error( "EXCEPTION raised: %s" % (str(e)) )
logging.error( "TRACEBACK:\n%s" % ('\n'.join(traceback.format_tb(tb))) )
raise
return HttpResponseBadRequest( "Exception raised %s." % e )
def get_builds_for_domain(request, domain_id):
"""Takes a POST containing a tar of all MD5's
and returns a tar of all missing submissions
Heh, this is explicitly against good REST methodology
We leave this inside the django-rest 'Resource' so we can
use their authentication tools
"""
try:
return _get_submissions(request, domain_id)
except Exception, e:
type, value, tb = sys.exc_info()
logging.error( "EXCEPTION raised: %s" % (str(e)) )
logging.error( "TRACEBACK:\n%s" % ('\n'.join(traceback.format_tb(tb))) )
return HttpResponseBadRequest( "Exception raised %s." % e )
def _get_builds(request, domain_id=None):
projects = Project.objects.all()
if domain_id:
# filter on domain, if it's set
try:
domain = Domain.objects.get(id=domain_id)
except Domain.DoesNotExist:
logging.error("Domain with id %s could not found." % domain_id)
return HttpResponseBadRequest("Domain with id %s could not found." % domain_id)
projects = projects.filter(domain=domain)
if 'export_path' not in settings.RAPIDSMS_APPS['buildmanager']:
logging.error("Please set 'export_path' in your hq buildmanager settings.")
return HttpResponseBadRequest("Please set 'export_path' in your hq buildmanager settings.")
export_dir = settings.RAPIDSMS_APPS['buildmanager']['export_path']
# For now this is RESTful, and de-facto returns all projects and builds.
# At some point we may require this to take in a list of guids or
# checksums much like the receiver does.
if projects.count() == 0:
logging.info("No projects could be found.")
return HttpResponse("No projects could be found.")
builds = ProjectBuild.objects.filter(project__in=projects)
if builds.count() == 0:
logging.info("No builds could be found.")
return HttpResponse("No builds could be found.")
compressor = TarCompressor()
export_path = os.path.join( export_dir, "commcarehq-builds.tar")
compressor.open(name=export_path)
# add the root project summaries to the compressor
_add_to_compressor(compressor, _get_project_summary(projects), "projects.json")
tars = []
for build in builds:
try:
summary_tar = _get_build_summary(build)
tars.append(summary_tar)
compressor.add_file(summary_tar.name)
except Exception, e:
logging.error("Unable to export build: %s. Error is %s." % (build, e))
raise
compressor.close()
response = HttpResponse()
response['Content-Length'] = os.path.getsize(export_path)
fin = open(export_path, 'rb')
wrapper = FileWrapper(fin)
response = HttpResponse(wrapper, content_type='application/tar')
response['Content-Disposition'] = 'attachment; filename=commcarehq-builds.tar'
return response
def _get_project_summary(projects):
"""Returns a single json string with the summary of the projects"""
return serializers.serialize('json', projects)
def _get_build_summary(build):
"""Package a build's metadata with its jad and jar and return it
as a tarball"""
temp_tar_path = tempfile.TemporaryFile().name
temp_json_path = os.path.join(tempfile.tempdir, "build%s.json" % build.id)
json_file = open(temp_json_path, "wb")
json_file.write(serializers.serialize('json', [build]))
json_file.close()
tarball = build_tarfile([json_file.name, build.jar_file, build.jad_file], temp_tar_path)
tarball.close()
return tarball
def _get_build_filename(build):
"""A unique but semi-readable filename to reference the build"""
return "%s-%s-%s.build" % (build.project.domain.name, build.project.name, build.id)
def _add_to_compressor(compressor, data, filename):
"""Add some data to the (assumed to be open) tar archive"""
compressor.add_stream(cStringIO.StringIO( data ), len(data), name=filename)
def _add_stream_to_compressor(compressor, data, length, filename):
"""Add some data to the (assumed to be open) tar archive"""
compressor.add_stream(data, length, name=filename)
| bsd-3-clause |
FCP-INDI/nipype | nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py | 12 | 1224 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..preprocess import ApplyDeformations
def test_ApplyDeformations_inputs():
input_map = dict(deformation_field=dict(field='comp{1}.def',
mandatory=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(field='fnames',
mandatory=True,
),
interp=dict(field='interp',
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
paths=dict(),
reference_volume=dict(field='comp{2}.id.space',
mandatory=True,
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
)
inputs = ApplyDeformations.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyDeformations_outputs():
output_map = dict(out_files=dict(),
)
outputs = ApplyDeformations.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
kopach/git-cola | cola/widgets/text.py | 2 | 28098 | """Text widgets"""
from __future__ import division, absolute_import, unicode_literals
import math
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
from ..qtutils import get
from .. import hotkeys
from .. import qtutils
from ..i18n import N_
from ..models import prefs
from . import defs
def get_stripped(widget):
return widget.get().strip()
class LineEdit(QtWidgets.QLineEdit):
cursor_changed = Signal(int, int)
def __init__(self, parent=None, row=1, get_value=None, clear_button=False):
QtWidgets.QLineEdit.__init__(self, parent)
self._row = row
if get_value is None:
get_value = get_stripped
self._get_value = get_value
self.cursor_position = LineEditCursorPosition(self, row)
if clear_button and hasattr(self, 'setClearButtonEnabled'):
self.setClearButtonEnabled(True)
def get(self):
"""Return the raw unicode value from Qt"""
return self.text()
def value(self):
"""Return the processed value, e.g. stripped"""
return self._get_value(self)
def set_value(self, value, block=False):
if block:
blocksig = self.blockSignals(True)
pos = self.cursorPosition()
self.setText(value)
self.setCursorPosition(pos)
if block:
self.blockSignals(blocksig)
class LineEditCursorPosition(object):
"""Translate cursorPositionChanged(int,int) into cursorPosition(int,int)
"""
def __init__(self, widget, row):
self._widget = widget
self._row = row
# Translate cursorPositionChanged into cursor_changed(int, int)
widget.cursorPositionChanged.connect(lambda old, new: self.emit())
def emit(self):
widget = self._widget
row = self._row
col = widget.cursorPosition()
widget.cursor_changed.emit(row, col)
def reset(self):
self._widget.setCursorPosition(0)
class BaseTextEditExtension(QtCore.QObject):
def __init__(self, widget, get_value, readonly):
QtCore.QObject.__init__(self, widget)
self.widget = widget
self.cursor_position = TextEditCursorPosition(widget, self)
if get_value is None:
get_value = get_stripped
self._get_value = get_value
self._tabwidth = 8
self._readonly = readonly
self._init_flags()
self.init()
def _init_flags(self):
widget = self.widget
widget.setMinimumSize(QtCore.QSize(1, 1))
widget.setWordWrapMode(QtGui.QTextOption.WordWrap)
widget.setLineWrapMode(widget.NoWrap)
widget.setCursorWidth(defs.cursor_width)
if self._readonly:
widget.setReadOnly(True)
widget.setAcceptDrops(False)
widget.setTabChangesFocus(True)
widget.setUndoRedoEnabled(False)
widget.setTextInteractionFlags(Qt.TextSelectableByKeyboard |
Qt.TextSelectableByMouse)
def get(self):
"""Return the raw unicode value from Qt"""
return self.widget.toPlainText()
def value(self):
"""Return a safe value, e.g. a stripped value"""
return self._get_value(self.widget)
def set_value(self, value, block=False):
if block:
blocksig = self.widget.blockSignals(True)
# Save cursor position
offset, selection_text = self.offset_and_selection()
old_value = get(self.widget)
# Update text
self.widget.setPlainText(value)
# Restore cursor
if selection_text and selection_text in value:
# If the old selection exists in the new text then re-select it.
idx = value.index(selection_text)
cursor = self.widget.textCursor()
cursor.setPosition(idx)
cursor.setPosition(idx + len(selection_text),
QtGui.QTextCursor.KeepAnchor)
self.widget.setTextCursor(cursor)
elif value == old_value:
# Otherwise, if the text is identical and there is no selection
# then restore the cursor position.
cursor = self.widget.textCursor()
cursor.setPosition(offset)
self.widget.setTextCursor(cursor)
else:
# If none of the above applied then restore the cursor position.
position = max(0, min(offset, len(value) - 1))
cursor = self.widget.textCursor()
cursor.setPosition(position)
self.widget.setTextCursor(cursor)
cursor = self.widget.textCursor()
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
self.widget.setTextCursor(cursor)
if block:
self.widget.blockSignals(blocksig)
def set_cursor_position(self, new_position):
cursor = self.widget.textCursor()
cursor.setPosition(new_position)
self.widget.setTextCursor(cursor)
def tabwidth(self):
return self._tabwidth
def set_tabwidth(self, width):
self._tabwidth = width
font = self.widget.font()
fm = QtGui.QFontMetrics(font)
pixels = fm.width('M' * width)
self.widget.setTabStopWidth(pixels)
def selected_line(self):
contents = self.value()
cursor = self.widget.textCursor()
offset = min(cursor.position(), len(contents)-1)
while (offset >= 1 and
contents[offset-1] and
contents[offset-1] != '\n'):
offset -= 1
data = contents[offset:]
if '\n' in data:
line, rest = data.split('\n', 1)
else:
line = data
return line
def cursor(self):
return self.widget.textCursor()
def has_selection(self):
return self.cursor().hasSelection()
def offset_and_selection(self):
cursor = self.cursor()
offset = cursor.selectionStart()
selection_text = cursor.selection().toPlainText()
return offset, selection_text
def mouse_press_event(self, event):
# Move the text cursor so that the right-click events operate
# on the current position, not the last left-clicked position.
widget = self.widget
if event.button() == Qt.RightButton:
if not widget.textCursor().hasSelection():
cursor = widget.cursorForPosition(event.pos())
widget.setTextCursor(widget.cursorForPosition(event.pos()))
# For extension by sub-classes
def init(self):
"""Called during init for class-specific settings"""
pass
def set_textwidth(self, width):
"""Set the text width"""
pass
def set_linebreak(self, brk):
"""Enable word wrapping"""
pass
class PlainTextEditExtension(BaseTextEditExtension):
def set_linebreak(self, brk):
if brk:
wrapmode = QtWidgets.QPlainTextEdit.WidgetWidth
else:
wrapmode = QtWidgets.QPlainTextEdit.NoWrap
self.widget.setLineWrapMode(wrapmode)
class PlainTextEdit(QtWidgets.QPlainTextEdit):
cursor_changed = Signal(int, int)
leave = Signal()
def __init__(self, parent=None, get_value=None, readonly=False):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.ext = PlainTextEditExtension(self, get_value, readonly)
self.cursor_position = self.ext.cursor_position
def get(self):
"""Return the raw unicode value from Qt"""
return self.ext.get()
# For compatibility with QTextEdit
def setText(self, value):
self.set_value(value)
def value(self):
"""Return a safe value, e.g. a stripped value"""
return self.ext.value()
def set_value(self, value, block=False):
self.ext.set_value(value, block=block)
def has_selection(self):
return self.ext.has_selection()
def selected_line(self):
return self.ext.selected_line()
def set_tabwidth(self, width):
self.ext.set_tabwidth(width)
def set_textwidth(self, width):
self.ext.set_textwidth(width)
def set_linebreak(self, brk):
self.ext.set_linebreak(brk)
def mousePressEvent(self, event):
self.ext.mouse_press_event(event)
super(PlainTextEdit, self).mousePressEvent(event)
def wheelEvent(self, event):
"""Disable control+wheelscroll text resizing"""
if event.modifiers() & Qt.ControlModifier:
event.ignore()
return
return super(PlainTextEdit, self).wheelEvent(event)
class TextEditExtension(BaseTextEditExtension):
def init(self):
widget = self.widget
widget.setAcceptRichText(False)
def set_linebreak(self, brk):
if brk:
wrapmode = QtWidgets.QTextEdit.FixedColumnWidth
else:
wrapmode = QtWidgets.QTextEdit.NoWrap
self.widget.setLineWrapMode(wrapmode)
def set_textwidth(self, width):
self.widget.setLineWrapColumnOrWidth(width)
class TextEdit(QtWidgets.QTextEdit):
cursor_changed = Signal(int, int)
leave = Signal()
def __init__(self, parent=None, get_value=None, readonly=False):
QtWidgets.QTextEdit.__init__(self, parent)
self.ext = TextEditExtension(self, get_value, readonly)
self.cursor_position = self.ext.cursor_position
def get(self):
"""Return the raw unicode value from Qt"""
return self.ext.get()
def value(self):
"""Return a safe value, e.g. a stripped value"""
return self.ext.value()
def set_value(self, value, block=False):
self.ext.set_value(value, block=block)
def selected_line(self):
return self.ext.selected_line()
def set_tabwidth(self, width):
self.ext.set_tabwidth(width)
def set_textwidth(self, width):
self.ext.set_textwidth(width)
def set_linebreak(self, brk):
self.ext.set_linebreak(brk)
def mousePressEvent(self, event):
self.ext.mouse_press_event(event)
super(TextEdit, self).mousePressEvent(event)
def wheelEvent(self, event):
"""Disable control+wheelscroll text resizing"""
if event.modifiers() & Qt.ControlModifier:
event.ignore()
return
return super(TextEdit, self).wheelEvent(event)
def should_expandtab(self, event):
return event.key() == Qt.Key_Tab and prefs.expandtab()
def expandtab(self):
tabwidth = max(self.ext.tabwidth(), 1)
cursor = self.textCursor()
cursor.insertText(' ' * tabwidth)
def keyPressEvent(self, event):
expandtab = self.should_expandtab(event)
if expandtab:
self.expandtab()
event.accept()
else:
QtWidgets.QTextEdit.keyPressEvent(self, event)
def keyReleaseEvent(self, event):
expandtab = self.should_expandtab(event)
if expandtab:
event.ignore()
else:
QtWidgets.QTextEdit.keyReleaseEvent(self, event)
class TextEditCursorPosition(object):
def __init__(self, widget, ext):
self._widget = widget
self._ext = ext
widget.cursorPositionChanged.connect(self.emit)
def emit(self):
widget = self._widget
ext = self._ext
cursor = widget.textCursor()
position = cursor.position()
txt = widget.get()
before = txt[:position]
row = before.count('\n')
line = before.split('\n')[row]
col = cursor.columnNumber()
col += line[:col].count('\t') * (ext.tabwidth() - 1)
widget.cursor_changed.emit(row+1, col)
def reset(self):
widget = self._widget
cursor = widget.textCursor()
cursor.setPosition(0)
widget.setTextCursor(cursor)
def setup_mono_font(widget):
widget.setFont(qtutils.diff_font())
widget.set_tabwidth(prefs.tabwidth())
class MonoTextEdit(PlainTextEdit):
def __init__(self, parent=None, readonly=False):
PlainTextEdit.__init__(self, parent=parent, readonly=readonly)
setup_mono_font(self)
def get_value_hinted(widget):
text = get_stripped(widget)
hint = get(widget.hint)
if text == hint:
return ''
else:
return text
class HintWidget(QtCore.QObject):
"""Extend a widget to provide hint messages
This primarily exists because setPlaceholderText() is only available
in Qt5, so this class provides consistent behavior across versions.
"""
def __init__(self, widget, hint):
QtCore.QObject.__init__(self, widget)
self._widget = widget
self._hint = hint
self._is_error = False
self.modern = modern = hasattr(widget, 'setPlaceholderText')
if modern:
widget.setPlaceholderText(hint)
# Palette for normal text
QPalette = QtGui.QPalette
palette = widget.palette()
hint_color = palette.color(QPalette.Disabled, QPalette.Text)
error_bg_color = QtGui.QColor(Qt.red).darker()
error_fg_color = QtGui.QColor(Qt.white)
hint_rgb = qtutils.rgb_css(hint_color)
error_bg_rgb = qtutils.rgb_css(error_bg_color)
error_fg_rgb = qtutils.rgb_css(error_fg_color)
env = dict(name=widget.__class__.__name__,
error_fg_rgb=error_fg_rgb,
error_bg_rgb=error_bg_rgb,
hint_rgb=hint_rgb)
self._default_style = ''
self._hint_style = """
%(name)s {
color: %(hint_rgb)s;
}
""" % env
self._error_style = """
%(name)s {
color: %(error_fg_rgb)s;
background-color: %(error_bg_rgb)s;
}
""" % env
def init(self):
"""Defer initialization to avoid circular dependencies during construction"""
if self.modern:
self.widget().setPlaceholderText(self.value())
else:
self.widget().installEventFilter(self)
self.enable(True)
def widget(self):
"""Return the parent text widget"""
return self._widget
def active(self):
"""Return True when hint-mode is active"""
return self.value() == get_stripped(self._widget)
def value(self):
"""Return the current hint text"""
return self._hint
def set_error(self, is_error):
"""Enable/disable error mode"""
self._is_error = is_error
self.refresh()
def set_value(self, hint):
"""Change the hint text"""
if self.modern:
self._hint = hint
self._widget.setPlaceholderText(hint)
else:
# If hint-mode is currently active, re-activate it
active = self.active()
self._hint = hint
if active or self.active():
self.enable(True)
def enable(self, enable):
"""Enable/disable hint-mode"""
if not self.modern:
if enable and self._hint:
self._widget.set_value(self._hint, block=True)
self._widget.cursor_position.reset()
else:
self._widget.clear()
self._update_palette(enable)
def refresh(self):
"""Update the palette to match the current mode"""
self._update_palette(self.active())
def _update_palette(self, hint):
"""Update to palette for normal/error/hint mode"""
if self._is_error:
style = self._error_style
elif not self.modern and hint:
style = self._hint_style
else:
style = self._default_style
self._widget.setStyleSheet(style)
def eventFilter(self, obj, event):
"""Enable/disable hint-mode when focus changes"""
etype = event.type()
if etype == QtCore.QEvent.FocusIn:
self.focus_in()
elif etype == QtCore.QEvent.FocusOut:
self.focus_out()
return False
def focus_in(self):
"""Disable hint-mode when focused"""
widget = self.widget()
if self.active():
self.enable(False)
widget.cursor_position.emit()
def focus_out(self):
"""Re-enable hint-mode when losing focus"""
widget = self.widget()
if not get(widget):
self.enable(True)
class HintedPlainTextEdit(PlainTextEdit):
"""A hinted plain text edit"""
def __init__(self, hint, parent=None, readonly=False):
PlainTextEdit.__init__(self, parent=parent,
get_value=get_value_hinted,
readonly=readonly)
self.hint = HintWidget(self, hint)
self.hint.init()
setup_mono_font(self)
# Refresh palettes when text changes
self.textChanged.connect(self.hint.refresh)
def set_value(self, value, block=False):
"""Set the widget text or enable hint mode when empty"""
if value or self.hint.modern:
PlainTextEdit.set_value(self, value, block=block)
else:
self.hint.enable(True)
class HintedTextEdit(TextEdit):
"""A hinted text edit"""
def __init__(self, hint, parent=None, readonly=False):
TextEdit.__init__(self, parent=parent,
get_value=get_value_hinted, readonly=readonly)
self.hint = HintWidget(self, hint)
self.hint.init()
setup_mono_font(self)
# Refresh palettes when text changes
self.textChanged.connect(self.hint.refresh)
def set_value(self, value, block=False):
"""Set the widget text or enable hint mode when empty"""
if value or self.hint.modern:
TextEdit.set_value(self, value, block=block)
else:
self.hint.enable(True)
# The vim-like read-only text view
class VimMixin(object):
def __init__(self, widget):
self.widget = widget
self.Base = widget.Base
# Common vim/unix-ish keyboard actions
self.add_navigation('Up', hotkeys.MOVE_UP,
shift=hotkeys.MOVE_UP_SHIFT)
self.add_navigation('Down', hotkeys.MOVE_DOWN,
shift=hotkeys.MOVE_DOWN_SHIFT)
self.add_navigation('Left', hotkeys.MOVE_LEFT,
shift=hotkeys.MOVE_LEFT_SHIFT)
self.add_navigation('Right', hotkeys.MOVE_RIGHT,
shift=hotkeys.MOVE_RIGHT_SHIFT)
self.add_navigation('WordLeft', hotkeys.WORD_LEFT)
self.add_navigation('WordRight', hotkeys.WORD_RIGHT)
self.add_navigation('StartOfLine', hotkeys.START_OF_LINE)
self.add_navigation('EndOfLine', hotkeys.END_OF_LINE)
qtutils.add_action(widget, 'PageUp',
lambda: widget.page(-widget.height()//2),
hotkeys.SECONDARY_ACTION)
qtutils.add_action(widget, 'PageDown',
lambda: widget.page(widget.height()//2),
hotkeys.PRIMARY_ACTION)
def add_navigation(self, name, hotkey, shift=None):
"""Add a hotkey along with a shift-variant"""
widget = self.widget
direction = getattr(QtGui.QTextCursor, name)
qtutils.add_action(widget, name,
lambda: self.move(direction), hotkey)
if shift:
qtutils.add_action(widget, 'Shift' + name,
lambda: self.move(direction, True), shift)
def move(self, direction, select=False, n=1):
widget = self.widget
cursor = widget.textCursor()
if select:
mode = QtGui.QTextCursor.KeepAnchor
else:
mode = QtGui.QTextCursor.MoveAnchor
if cursor.movePosition(direction, mode, n):
self.set_text_cursor(cursor)
def page(self, offset):
widget = self.widget
rect = widget.cursorRect()
x = rect.x()
y = rect.y() + offset
new_cursor = widget.cursorForPosition(QtCore.QPoint(x, y))
if new_cursor is not None:
self.set_text_cursor(new_cursor)
def set_text_cursor(self, cursor):
widget = self.widget
widget.setTextCursor(cursor)
widget.ensureCursorVisible()
widget.viewport().update()
def keyPressEvent(self, event):
"""Custom keyboard behaviors
The leave() signal is emitted when `Up` is pressed and we're already
at the beginning of the text. This allows the parent widget to
orchestrate some higher-level interaction, such as giving focus to
another widget.
When in the middle of the first line and `Up` is pressed, the cursor
is moved to the beginning of the line.
"""
widget = self.widget
if event.key() == Qt.Key_Up:
cursor = widget.textCursor()
position = cursor.position()
if position == 0:
# The cursor is at the beginning of the line.
# Emit a signal so that the parent can e.g. change focus.
widget.leave.emit()
elif get(widget)[:position].count('\n') == 0:
# The cursor is in the middle of the first line of text.
# We can't go up ~ jump to the beginning of the line.
# Select the text if shift is pressed.
if event.modifiers() & Qt.ShiftModifier:
mode = QtGui.QTextCursor.KeepAnchor
else:
mode = QtGui.QTextCursor.MoveAnchor
cursor.movePosition(QtGui.QTextCursor.StartOfLine, mode)
widget.setTextCursor(cursor)
return self.Base.keyPressEvent(widget, event)
class VimHintedPlainTextEdit(HintedPlainTextEdit):
"""HintedPlainTextEdit with vim hotkeys
This can only be used in read-only mode.
"""
Base = HintedPlainTextEdit
Mixin = VimMixin
def __init__(self, hint, parent=None):
HintedPlainTextEdit.__init__(self, hint, parent=parent, readonly=True)
self._mixin = self.Mixin(self)
def move(self, direction, select=False, n=1):
return self._mixin.page(direction, select=select, n=n)
def page(self, offset):
return self._mixin.page(offset)
def keyPressEvent(self, event):
return self._mixin.keyPressEvent(event)
class VimTextEdit(MonoTextEdit):
"""Text viewer with vim-like hotkeys
This can only be used in read-only mode.
"""
Base = MonoTextEdit
Mixin = VimMixin
def __init__(self, parent=None):
MonoTextEdit.__init__(self, parent=None, readonly=True)
self._mixin = self.Mixin(self)
def move(self, direction, select=False, n=1):
return self._mixin.page(direction, select=select, n=n)
def page(self, offset):
return self._mixin.page(offset)
def keyPressEvent(self, event):
return self._mixin.keyPressEvent(event)
class HintedLineEdit(LineEdit):
def __init__(self, hint, parent=None):
LineEdit.__init__(self, parent=parent, get_value=get_value_hinted)
self.hint = HintWidget(self, hint)
self.hint.init()
self.setFont(qtutils.diff_font())
self.textChanged.connect(lambda text: self.hint.refresh())
def text_dialog(text, title):
"""Show a wall of text in a dialog"""
parent = qtutils.active_window()
label = QtWidgets.QLabel(parent)
label.setFont(qtutils.diff_font())
label.setText(text)
label.setMargin(defs.large_margin)
text_flags = Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse
label.setTextInteractionFlags(text_flags)
widget = QtWidgets.QDialog(parent)
widget.setWindowModality(Qt.WindowModal)
widget.setWindowTitle(title)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(label)
layout = qtutils.hbox(defs.margin, defs.spacing, scroll)
widget.setLayout(layout)
qtutils.add_action(widget, N_('Close'), widget.accept,
Qt.Key_Question, Qt.Key_Enter, Qt.Key_Return)
widget.show()
return widget
class VimTextBrowser(VimTextEdit):
"""Text viewer with line number annotations"""
def __init__(self, parent=None, readonly=False):
VimTextEdit.__init__(self, parent=parent)
self.numbers = LineNumbers(self)
def resizeEvent(self, event):
super(VimTextBrowser, self).resizeEvent(event)
self.numbers.refresh_size()
class TextDecorator(QtWidgets.QWidget):
"""Common functionality for providing line numbers in text widgets"""
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
self.editor = parent
parent.blockCountChanged.connect(lambda x: self._refresh_viewport())
parent.cursorPositionChanged.connect(self.refresh)
parent.updateRequest.connect(self._refresh_rect)
def refresh(self):
"""Refresh the numbers display"""
rect = self.editor.viewport().rect()
self._refresh_rect(rect, 0)
def _refresh_rect(self, rect, dy):
if dy:
self.scroll(0, dy)
else:
self.update(0, rect.y(), self.width(), rect.height())
if rect.contains(self.editor.viewport().rect()):
self._refresh_viewport()
def _refresh_viewport(self):
self.editor.setViewportMargins(self.width_hint(), 0, 0, 0)
def refresh_size(self):
rect = self.editor.contentsRect()
geom = QtCore.QRect(rect.left(), rect.top(),
self.width_hint(), rect.height())
self.setGeometry(geom)
def sizeHint(self):
return QtCore.QSize(self.width_hint(), 0)
class LineNumbers(TextDecorator):
"""Provide line numbers for QPlainTextEdit widgets"""
def __init__(self, parent):
TextDecorator.__init__(self, parent)
self.highlight_line = -1
def width_hint(self):
document = self.editor.document()
digits = int(math.log(max(1, document.blockCount()), 10))
return defs.margin + self.fontMetrics().width('0') * (digits + 2)
def set_highlighted(self, line_number):
"""Set the line to highlight"""
self.highlight_line = line_number
def paintEvent(self, event):
"""Paint the line number"""
QPalette = QtGui.QPalette
painter = QtGui.QPainter(self)
palette = self.palette()
painter.fillRect(event.rect(), palette.color(QPalette.Base))
editor = self.editor
content_offset = editor.contentOffset()
block = editor.firstVisibleBlock()
current_block_number = max(0, self.editor.textCursor().blockNumber())
width = self.width()
event_rect_bottom = event.rect().bottom()
highlight = palette.color(QPalette.Highlight)
window = palette.color(QPalette.Window)
disabled = palette.color(QPalette.Disabled, QPalette.Text)
painter.setPen(disabled)
while block.isValid():
block_geom = editor.blockBoundingGeometry(block)
block_top = block_geom.translated(content_offset).top()
if not block.isVisible() or block_top >= event_rect_bottom:
break
rect = block_geom.translated(content_offset).toRect()
block_number = block.blockNumber();
if block_number == self.highlight_line:
painter.fillRect(rect.x(), rect.y(),
width, rect.height(), highlight)
elif block_number == current_block_number:
painter.fillRect(rect.x(), rect.y(),
width, rect.height(), window)
number = '%s' % (block_number + 1)
painter.drawText(rect.x(), rect.y(),
self.width() - (defs.margin * 2),
rect.height(),
Qt.AlignRight | Qt.AlignVCenter,
number)
block = block.next() # pylint: disable=next-method-called
| gpl-2.0 |
code-sauce/tensorflow | tensorflow/contrib/learn/python/learn/ops/embeddings_ops.py | 116 | 3510 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Ops to work with embeddings.
Note: categorical variables are handled via embeddings in many cases.
For example, in case of words.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
@deprecated('2016-12-01', 'Use `tf.embedding_lookup` instead.')
def embedding_lookup(params, ids, name='embedding_lookup'):
"""Provides a N dimensional version of tf.embedding_lookup.
Ids are flattened to a 1d tensor before being passed to embedding_lookup
then, they are unflattend to match the original ids shape plus an extra
leading dimension of the size of the embeddings.
Args:
params: List of tensors of size D0 x D1 x ... x Dn-2 x Dn-1.
ids: N-dimensional tensor of B0 x B1 x .. x Bn-2 x Bn-1.
Must contain indexes into params.
name: Optional name for the op.
Returns:
A tensor of size B0 x B1 x .. x Bn-2 x Bn-1 x D1 x ... x Dn-2 x Dn-1
containing the values from the params tensor(s) for indecies in ids.
Raises:
ValueError: if some parameters are invalid.
"""
with ops.name_scope(name, 'embedding_lookup', [params, ids]):
params = ops.convert_to_tensor(params)
ids = ops.convert_to_tensor(ids)
shape = array_ops_.shape(ids)
ids_flat = array_ops_.reshape(
ids, math_ops.reduce_prod(shape, keep_dims=True))
embeds_flat = nn.embedding_lookup(params, ids_flat, name)
embed_shape = array_ops_.concat([shape, [-1]], 0)
embeds = array_ops_.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(params.get_shape()[1:]))
return embeds
@deprecated('2016-12-01', 'Use `tf.contrib.layers.embed_sequence` instead.')
def categorical_variable(tensor_in, n_classes, embedding_size, name):
"""Creates an embedding for categorical variable with given number of classes.
Args:
tensor_in: Input tensor with class identifier (can be batch or
N-dimensional).
n_classes: Number of classes.
embedding_size: Size of embedding vector to represent each class.
name: Name of this categorical variable.
Returns:
Tensor of input shape, with additional dimension for embedding.
Example:
Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
tensor, where each row is representation of the class.
"""
with vs.variable_scope(name):
embeddings = vs.get_variable(name + '_embeddings',
[n_classes, embedding_size])
return embedding_lookup(embeddings, tensor_in)
| apache-2.0 |
sumedhasingla/VTK | ThirdParty/Twisted/twisted/internet/task.py | 23 | 29498 | # -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Scheduling utility methods and classes.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import sys
import time
from zope.interface import implementer
from twisted.python import log
from twisted.python import reflect
from twisted.python.failure import Failure
from twisted.internet import base, defer
from twisted.internet.interfaces import IReactorTime
from twisted.internet.error import ReactorNotRunning
class LoopingCall:
"""Call a function repeatedly.
If C{f} returns a deferred, rescheduling will not take place until the
deferred has fired. The result value is ignored.
@ivar f: The function to call.
@ivar a: A tuple of arguments to pass the function.
@ivar kw: A dictionary of keyword arguments to pass to the function.
@ivar clock: A provider of
L{twisted.internet.interfaces.IReactorTime}. The default is
L{twisted.internet.reactor}. Feel free to set this to
something else, but it probably ought to be set *before*
calling L{start}.
@type running: C{bool}
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
(or is currently being called). It is set to C{True} when L{start} is
called and set to C{False} when L{stop} is called or if C{f} raises an
exception. In either case, it will be C{False} by the time the
C{Deferred} returned by L{start} fires its callback or errback.
@type _expectNextCallAt: C{float}
@ivar _expectNextCallAt: The time at which this instance most recently
scheduled itself to run.
@type _realLastTime: C{float}
@ivar _realLastTime: When counting skips, the time at which the skip
counter was last invoked.
@type _runAtStart: C{bool}
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
to L{LoopingCall.start}.
"""
call = None
running = False
deferred = None
interval = None
_expectNextCallAt = 0.0
_runAtStart = False
starttime = None
def __init__(self, f, *a, **kw):
self.f = f
self.a = a
self.kw = kw
from twisted.internet import reactor
self.clock = reactor
def withCount(cls, countCallable):
"""
An alternate constructor for L{LoopingCall} that makes available the
number of calls which should have occurred since it was last invoked.
Note that this number is an C{int} value; It represents the discrete
number of calls that should have been made. For example, if you are
using a looping call to display an animation with discrete frames, this
number would be the number of frames to advance.
The count is normally 1, but can be higher. For example, if the reactor
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
returned from a previous call is not fired before an interval has
elapsed, or if the callable itself blocks for longer than an interval,
preventing I{itself} from being called.
@param countCallable: A callable that will be invoked each time the
resulting LoopingCall is run, with an integer specifying the number
of calls that should have been invoked.
@type countCallable: 1-argument callable which takes an C{int}
@return: An instance of L{LoopingCall} with call counting enabled,
which provides the count as the first positional argument.
@rtype: L{LoopingCall}
@since: 9.0
"""
def counter():
now = self.clock.seconds()
lastTime = self._realLastTime
if lastTime is None:
lastTime = self.starttime
if self._runAtStart:
lastTime -= self.interval
self._realLastTime = now
lastInterval = self._intervalOf(lastTime)
thisInterval = self._intervalOf(now)
count = thisInterval - lastInterval
return countCallable(count)
self = cls(counter)
self._realLastTime = None
return self
withCount = classmethod(withCount)
def _intervalOf(self, t):
"""
Determine the number of intervals passed as of the given point in
time.
@param t: The specified time (from the start of the L{LoopingCall}) to
be measured in intervals
@return: The C{int} number of intervals which have passed as of the
given point in time.
"""
elapsedTime = t - self.starttime
intervalNum = int(elapsedTime / self.interval)
return intervalNum
def start(self, interval, now=True):
"""
Start running function every interval seconds.
@param interval: The number of seconds between calls. May be
less than one. Precision will depend on the underlying
platform, the available hardware, and the load on the system.
@param now: If True, run this call right now. Otherwise, wait
until the interval has elapsed before beginning.
@return: A Deferred whose callback will be invoked with
C{self} when C{self.stop} is called, or whose errback will be
invoked when the function raises an exception or returned a
deferred that has its errback invoked.
"""
assert not self.running, ("Tried to start an already running "
"LoopingCall.")
if interval < 0:
raise ValueError("interval must be >= 0")
self.running = True
d = self.deferred = defer.Deferred()
self.starttime = self.clock.seconds()
self._expectNextCallAt = self.starttime
self.interval = interval
self._runAtStart = now
if now:
self()
else:
self._reschedule()
return d
def stop(self):
"""Stop running function.
"""
assert self.running, ("Tried to stop a LoopingCall that was "
"not running.")
self.running = False
if self.call is not None:
self.call.cancel()
self.call = None
d, self.deferred = self.deferred, None
d.callback(self)
def reset(self):
"""
Skip the next iteration and reset the timer.
@since: 11.1
"""
assert self.running, ("Tried to reset a LoopingCall that was "
"not running.")
if self.call is not None:
self.call.cancel()
self.call = None
self._expectNextCallAt = self.clock.seconds()
self._reschedule()
def __call__(self):
def cb(result):
if self.running:
self._reschedule()
else:
d, self.deferred = self.deferred, None
d.callback(self)
def eb(failure):
self.running = False
d, self.deferred = self.deferred, None
d.errback(failure)
self.call = None
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
d.addCallback(cb)
d.addErrback(eb)
def _reschedule(self):
"""
Schedule the next iteration of this looping call.
"""
if self.interval == 0:
self.call = self.clock.callLater(0, self)
return
currentTime = self.clock.seconds()
# Find how long is left until the interval comes around again.
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
# Make sure it is in the future, in case more than one interval worth
# of time passed since the previous call was made.
nextTime = max(
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
# If the interval falls on the current time exactly, skip it and
# schedule the call for the next interval.
if nextTime == currentTime:
nextTime += self.interval
self._expectNextCallAt = nextTime
self.call = self.clock.callLater(nextTime - currentTime, self)
def __repr__(self):
if hasattr(self.f, '__qualname__'):
func = self.f.__qualname__
elif hasattr(self.f, '__name__'):
func = self.f.__name__
if hasattr(self.f, 'im_class'):
func = self.f.im_class.__name__ + '.' + func
else:
func = reflect.safe_repr(self.f)
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
self.interval, func, reflect.safe_repr(self.a),
reflect.safe_repr(self.kw))
class SchedulerError(Exception):
"""
The operation could not be completed because the scheduler or one of its
tasks was in an invalid state. This exception should not be raised
directly, but is a superclass of various scheduler-state-related
exceptions.
"""
class SchedulerStopped(SchedulerError):
"""
The operation could not complete because the scheduler was stopped in
progress or was already stopped.
"""
class TaskFinished(SchedulerError):
"""
The operation could not complete because the task was already completed,
stopped, encountered an error or otherwise permanently stopped running.
"""
class TaskDone(TaskFinished):
"""
The operation could not complete because the task was already completed.
"""
class TaskStopped(TaskFinished):
"""
The operation could not complete because the task was stopped.
"""
class TaskFailed(TaskFinished):
"""
The operation could not complete because the task died with an unhandled
error.
"""
class NotPaused(SchedulerError):
"""
This exception is raised when a task is resumed which was not previously
paused.
"""
class _Timer(object):
MAX_SLICE = 0.01
def __init__(self):
self.end = time.time() + self.MAX_SLICE
def __call__(self):
return time.time() >= self.end
_EPSILON = 0.00000001
def _defaultScheduler(x):
from twisted.internet import reactor
return reactor.callLater(_EPSILON, x)
class CooperativeTask(object):
"""
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
paused, resumed, and stopped. It can also have its completion (or
termination) monitored.
@see: L{Cooperator.cooperate}
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
asked to do work.
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
participates in, which is used to re-insert it upon resume.
@ivar _deferreds: the list of L{defer.Deferred}s to fire when this task
completes, fails, or finishes.
@type _deferreds: C{list}
@type _cooperator: L{Cooperator}
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
been paused; if 0, it is running.
@type _pauseCount: C{int}
@ivar _completionState: The completion-state of this L{CooperativeTask}.
C{None} if the task is not yet completed, an instance of L{TaskStopped}
if C{stop} was called to stop this task early, of L{TaskFailed} if the
application code in the iterator raised an exception which caused it to
terminate, and of L{TaskDone} if it terminated normally via raising
C{StopIteration}.
@type _completionState: L{TaskFinished}
"""
def __init__(self, iterator, cooperator):
"""
A private constructor: to create a new L{CooperativeTask}, see
L{Cooperator.cooperate}.
"""
self._iterator = iterator
self._cooperator = cooperator
self._deferreds = []
self._pauseCount = 0
self._completionState = None
self._completionResult = None
cooperator._addTask(self)
def whenDone(self):
"""
Get a L{defer.Deferred} notification of when this task is complete.
@return: a L{defer.Deferred} that fires with the C{iterator} that this
L{CooperativeTask} was created with when the iterator has been
exhausted (i.e. its C{next} method has raised C{StopIteration}), or
fails with the exception raised by C{next} if it raises some other
exception.
@rtype: L{defer.Deferred}
"""
d = defer.Deferred()
if self._completionState is None:
self._deferreds.append(d)
else:
d.callback(self._completionResult)
return d
def pause(self):
"""
Pause this L{CooperativeTask}. Stop doing work until
L{CooperativeTask.resume} is called. If C{pause} is called more than
once, C{resume} must be called an equal number of times to resume this
task.
@raise TaskFinished: if this task has already finished or completed.
"""
self._checkFinish()
self._pauseCount += 1
if self._pauseCount == 1:
self._cooperator._removeTask(self)
def resume(self):
"""
Resume processing of a paused L{CooperativeTask}.
@raise NotPaused: if this L{CooperativeTask} is not paused.
"""
if self._pauseCount == 0:
raise NotPaused()
self._pauseCount -= 1
if self._pauseCount == 0 and self._completionState is None:
self._cooperator._addTask(self)
def _completeWith(self, completionState, deferredResult):
"""
@param completionState: a L{TaskFinished} exception or a subclass
thereof, indicating what exception should be raised when subsequent
operations are performed.
@param deferredResult: the result to fire all the deferreds with.
"""
self._completionState = completionState
self._completionResult = deferredResult
if not self._pauseCount:
self._cooperator._removeTask(self)
# The Deferreds need to be invoked after all this is completed, because
# a Deferred may want to manipulate other tasks in a Cooperator. For
# example, if you call "stop()" on a cooperator in a callback on a
# Deferred returned from whenDone(), this CooperativeTask must be gone
# from the Cooperator by that point so that _completeWith is not
# invoked reentrantly; that would cause these Deferreds to blow up with
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
for d in self._deferreds:
d.callback(deferredResult)
def stop(self):
"""
Stop further processing of this task.
@raise TaskFinished: if this L{CooperativeTask} has previously
completed, via C{stop}, completion, or failure.
"""
self._checkFinish()
self._completeWith(TaskStopped(), Failure(TaskStopped()))
def _checkFinish(self):
"""
If this task has been stopped, raise the appropriate subclass of
L{TaskFinished}.
"""
if self._completionState is not None:
raise self._completionState
def _oneWorkUnit(self):
"""
Perform one unit of work for this task, retrieving one item from its
iterator, stopping if there are no further items in the iterator, and
pausing if the result was a L{defer.Deferred}.
"""
try:
result = next(self._iterator)
except StopIteration:
self._completeWith(TaskDone(), self._iterator)
except:
self._completeWith(TaskFailed(), Failure())
else:
if isinstance(result, defer.Deferred):
self.pause()
def failLater(f):
self._completeWith(TaskFailed(), f)
result.addCallbacks(lambda result: self.resume(),
failLater)
class Cooperator(object):
"""
Cooperative task scheduler.
A cooperative task is an iterator where each iteration represents an
atomic unit of work. When the iterator yields, it allows the
L{Cooperator} to decide which of its tasks to execute next. If the
iterator yields a L{defer.Deferred} then work will pause until the
L{defer.Deferred} fires and completes its callback chain.
When a L{Cooperator} has more than one task, it distributes work between
all tasks.
There are two ways to add tasks to a L{Cooperator}, L{cooperate} and
L{coiterate}. L{cooperate} is the more useful of the two, as it returns a
L{CooperativeTask}, which can be L{paused<CooperativeTask.pause>},
L{resumed<CooperativeTask.resume>} and L{waited
on<CooperativeTask.whenDone>}. L{coiterate} has the same effect, but
returns only a L{defer.Deferred} that fires when the task is done.
L{Cooperator} can be used for many things, including but not limited to:
- running one or more computationally intensive tasks without blocking
- limiting parallelism by running a subset of the total tasks
simultaneously
- doing one thing, waiting for a L{Deferred<defer.Deferred>} to fire,
doing the next thing, repeat (i.e. serializing a sequence of
asynchronous tasks)
Multiple L{Cooperator}s do not cooperate with each other, so for most
cases you should use the L{global cooperator<task.cooperate>}.
"""
def __init__(self,
terminationPredicateFactory=_Timer,
scheduler=_defaultScheduler,
started=True):
"""
Create a scheduler-like object to which iterators may be added.
@param terminationPredicateFactory: A no-argument callable which will
be invoked at the beginning of each step and should return a
no-argument callable which will return True when the step should be
terminated. The default factory is time-based and allows iterators to
run for 1/100th of a second at a time.
@param scheduler: A one-argument callable which takes a no-argument
callable and should invoke it at some future point. This will be used
to schedule each step of this Cooperator.
@param started: A boolean which indicates whether iterators should be
stepped as soon as they are added, or if they will be queued up until
L{Cooperator.start} is called.
"""
self._tasks = []
self._metarator = iter(())
self._terminationPredicateFactory = terminationPredicateFactory
self._scheduler = scheduler
self._delayedCall = None
self._stopped = False
self._started = started
def coiterate(self, iterator, doneDeferred=None):
"""
Add an iterator to the list of iterators this L{Cooperator} is
currently running.
Equivalent to L{cooperate}, but returns a L{defer.Deferred} that will
be fired when the task is done.
@param doneDeferred: If specified, this will be the Deferred used as
the completion deferred. It is suggested that you use the default,
which creates a new Deferred for you.
@return: a Deferred that will fire when the iterator finishes.
"""
if doneDeferred is None:
doneDeferred = defer.Deferred()
CooperativeTask(iterator, self).whenDone().chainDeferred(doneDeferred)
return doneDeferred
def cooperate(self, iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return CooperativeTask(iterator, self)
def _addTask(self, task):
"""
Add a L{CooperativeTask} object to this L{Cooperator}.
"""
if self._stopped:
self._tasks.append(task) # XXX silly, I know, but _completeWith
# does the inverse
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
else:
self._tasks.append(task)
self._reschedule()
def _removeTask(self, task):
"""
Remove a L{CooperativeTask} from this L{Cooperator}.
"""
self._tasks.remove(task)
# If no work left to do, cancel the delayed call:
if not self._tasks and self._delayedCall:
self._delayedCall.cancel()
self._delayedCall = None
def _tasksWhileNotStopped(self):
"""
Yield all L{CooperativeTask} objects in a loop as long as this
L{Cooperator}'s termination condition has not been met.
"""
terminator = self._terminationPredicateFactory()
while self._tasks:
for t in self._metarator:
yield t
if terminator():
return
self._metarator = iter(self._tasks)
def _tick(self):
"""
Run one scheduler tick.
"""
self._delayedCall = None
for taskObj in self._tasksWhileNotStopped():
taskObj._oneWorkUnit()
self._reschedule()
_mustScheduleOnStart = False
def _reschedule(self):
if not self._started:
self._mustScheduleOnStart = True
return
if self._delayedCall is None and self._tasks:
self._delayedCall = self._scheduler(self._tick)
def start(self):
"""
Begin scheduling steps.
"""
self._stopped = False
self._started = True
if self._mustScheduleOnStart:
del self._mustScheduleOnStart
self._reschedule()
def stop(self):
"""
Stop scheduling steps. Errback the completion Deferreds of all
iterators which have been added and forget about them.
"""
self._stopped = True
for taskObj in self._tasks:
taskObj._completeWith(SchedulerStopped(),
Failure(SchedulerStopped()))
self._tasks = []
if self._delayedCall is not None:
self._delayedCall.cancel()
self._delayedCall = None
@property
def running(self):
"""
Is this L{Cooperator} is currently running?
@return: C{True} if the L{Cooperator} is running, C{False} otherwise.
@rtype: C{bool}
"""
return (self._started and not self._stopped)
_theCooperator = Cooperator()
def coiterate(iterator):
"""
Cooperatively iterate over the given iterator, dividing runtime between it
and all other iterators which have been passed to this function and not yet
exhausted.
@param iterator: the iterator to invoke.
@return: a Deferred that will fire when the iterator finishes.
"""
return _theCooperator.coiterate(iterator)
def cooperate(iterator):
"""
Start running the given iterator as a long-running cooperative task, by
calling next() on it as a periodic timed event.
This is very useful if you have computationally expensive tasks that you
want to run without blocking the reactor. Just break each task up so that
it yields frequently, pass it in here and the global L{Cooperator} will
make sure work is distributed between them without blocking longer than a
single iteration of a single task.
@param iterator: the iterator to invoke.
@return: a L{CooperativeTask} object representing this task.
"""
return _theCooperator.cooperate(iterator)
@implementer(IReactorTime)
class Clock:
"""
Provide a deterministic, easily-controlled implementation of
L{IReactorTime.callLater}. This is commonly useful for writing
deterministic unit tests for code which schedules events using this API.
"""
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
"""
Pretend to be time.time(). This is used internally when an operation
such as L{IDelayedCall.reset} needs to determine a a time value
relative to the current time.
@rtype: C{float}
@return: The time which should be considered the current time.
"""
return self.rightNow
def _sortCalls(self):
"""
Sort the pending calls according to the time they are scheduled.
"""
self.calls.sort(key=lambda a: a.getTime())
def callLater(self, when, what, *a, **kw):
"""
See L{twisted.internet.interfaces.IReactorTime.callLater}.
"""
dc = base.DelayedCall(self.seconds() + when,
what, a, kw,
self.calls.remove,
lambda c: None,
self.seconds)
self.calls.append(dc)
self._sortCalls()
return dc
def getDelayedCalls(self):
"""
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
"""
return self.calls
def advance(self, amount):
"""
Move time on this clock forward by the given amount and run whatever
pending calls should be run.
@type amount: C{float}
@param amount: The number of seconds which to advance this clock's
time.
"""
self.rightNow += amount
self._sortCalls()
while self.calls and self.calls[0].getTime() <= self.seconds():
call = self.calls.pop(0)
call.called = 1
call.func(*call.args, **call.kw)
self._sortCalls()
def pump(self, timings):
"""
Advance incrementally by the given set of times.
@type timings: iterable of C{float}
"""
for amount in timings:
self.advance(amount)
def deferLater(clock, delay, callable, *args, **kw):
"""
Call the given function after a certain period of time has passed.
@type clock: L{IReactorTime} provider
@param clock: The object which will be used to schedule the delayed
call.
@type delay: C{float} or C{int}
@param delay: The number of seconds to wait before calling the function.
@param callable: The object to call after the delay.
@param *args: The positional arguments to pass to C{callable}.
@param **kw: The keyword arguments to pass to C{callable}.
@rtype: L{defer.Deferred}
@return: A deferred that fires with the result of the callable when the
specified time has elapsed.
"""
def deferLaterCancel(deferred):
delayedCall.cancel()
d = defer.Deferred(deferLaterCancel)
d.addCallback(lambda ignored: callable(*args, **kw))
delayedCall = clock.callLater(delay, d.callback, None)
return d
def react(main, argv=(), _reactor=None):
"""
Call C{main} and run the reactor until the L{Deferred} it returns fires.
This is intended as the way to start up an application with a well-defined
completion condition. Use it to write clients or one-off asynchronous
operations. Prefer this to calling C{reactor.run} directly, as this
function will also:
- Take care to call C{reactor.stop} once and only once, and at the right
time.
- Log any failures from the C{Deferred} returned by C{main}.
- Exit the application when done, with exit code 0 in case of success and
1 in case of failure. If C{main} fails with a C{SystemExit} error, the
code returned is used.
The following demonstrates the signature of a C{main} function which can be
used with L{react}::
def main(reactor, username, password):
return defer.succeed('ok')
task.react(main, ('alice', 'secret'))
@param main: A callable which returns a L{Deferred}. It should
take the reactor as its first parameter, followed by the elements of
C{argv}.
@param argv: A list of arguments to pass to C{main}. If omitted the
callable will be invoked with no additional arguments.
@param _reactor: An implementation detail to allow easier unit testing. Do
not supply this parameter.
@since: 12.3
"""
if _reactor is None:
from twisted.internet import reactor as _reactor
finished = main(_reactor, *argv)
codes = [0]
stopping = []
_reactor.addSystemEventTrigger('before', 'shutdown', stopping.append, True)
def stop(result, stopReactor):
if stopReactor:
try:
_reactor.stop()
except ReactorNotRunning:
pass
if isinstance(result, Failure):
if result.check(SystemExit) is not None:
code = result.value.code
else:
log.err(result, "main function encountered error")
code = 1
codes[0] = code
def cbFinish(result):
if stopping:
stop(result, False)
else:
_reactor.callWhenRunning(stop, result, True)
finished.addBoth(cbFinish)
_reactor.run()
sys.exit(codes[0])
__all__ = [
'LoopingCall',
'Clock',
'SchedulerStopped', 'Cooperator', 'coiterate',
'deferLater', 'react']
| bsd-3-clause |
azaghal/ansible | test/lib/ansible_test/_internal/cloud/scaleway.py | 49 | 1991 | """Scaleway plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..util import (
ConfigParser,
display,
)
class ScalewayCloudProvider(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def __init__(self, args):
"""
:type args: TestConfig
"""
super(ScalewayCloudProvider, self).__init__(args)
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
super(ScalewayCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(ScalewayCloudProvider, self).setup()
if os.path.isfile(self.config_static_path):
self.config_path = self.config_static_path
self.managed = False
class ScalewayCloudEnvironment(CloudEnvironment):
"""
Updates integration test environment after delegation. Will setup the config file as parameter.
"""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
env_vars = dict(
SCW_API_KEY=parser.get('default', 'key'),
SCW_ORG=parser.get('default', 'org')
)
display.sensitive.add(env_vars['SCW_API_KEY'])
ansible_vars = dict(
scw_org=parser.get('default', 'org'),
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| gpl-3.0 |
jyotikamboj/container | django/contrib/redirects/middleware.py | 109 | 1861 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django import http
class RedirectFallbackMiddleware(object):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = http.HttpResponseGone
response_redirect_class = http.HttpResponsePermanentRedirect
def __init__(self):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if settings.APPEND_SLASH and not request.path.endswith('/'):
# Try appending a trailing slash.
path_len = len(request.path)
full_path = full_path[:path_len] + '/' + full_path[path_len:]
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
| mit |
python-bonobo/bonobo | bonobo/config/configurables.py | 2 | 7319 | from bonobo.errors import AbstractError
from bonobo.util import get_name, iscontextprocessor, isoption, sortedlist
__all__ = ["Configurable"]
get_creation_counter = lambda v: v._creation_counter
class ConfigurableMeta(type):
"""
Metaclass for Configurables that will add options to a special __options__ dict.
"""
def __init__(cls, what, bases=None, dict=None):
super().__init__(what, bases, dict)
cls.__processors = sortedlist()
cls.__processors_cache = None
cls.__methods = sortedlist()
cls.__options = sortedlist()
cls.__names = set()
# cls.__kwoptions = []
for typ in cls.__mro__:
for name, value in filter(lambda x: isoption(x[1]), typ.__dict__.items()):
if iscontextprocessor(value):
cls.__processors.insort((value._creation_counter, value))
continue
if not value.name:
value.name = name
if not name in cls.__names:
cls.__names.add(name)
cls.__options.insort((not value.positional, value._creation_counter, name, value))
# Docstring formating
_options_doc = []
for _positional, _counter, _name, _value in cls.__options:
_param = _name
if _value.type:
_param = get_name(_value.type) + " " + _param
prefix = ":param {}: ".format(_param)
for lineno, line in enumerate((_value.__doc__ or "").split("\n")):
_options_doc.append((" " * len(prefix) if lineno else prefix) + line)
cls.__doc__ = "\n\n".join(map(str.strip, filter(None, (cls.__doc__, "\n".join(_options_doc)))))
@property
def __options__(cls):
return ((name, option) for _, _, name, option in cls.__options)
@property
def __options_dict__(cls):
return dict(cls.__options__)
@property
def __processors__(cls):
if cls.__processors_cache is None:
cls.__processors_cache = [processor for _, processor in cls.__processors]
return cls.__processors_cache
def __repr__(self):
return " ".join(("<Configurable", super(ConfigurableMeta, self).__repr__().split(" ", 1)[1]))
try:
import _functools
except ImportError:
import functools
PartiallyConfigured = functools.partial
else:
class PartiallyConfigured(_functools.partial):
@property # TODO XXX cache this
def _options_values(self):
""" Simulate option values for partially configured objects. """
try:
return self.__options_values
except AttributeError:
self.__options_values = {**self.keywords}
position = 0
for name, option in self.func.__options__:
if not option.positional:
break # no positional left
if name in self.keywords:
continue # already fulfilled
self.__options_values[name] = self.args[position] if len(self.args) >= position + 1 else None
position += 1
return self.__options_values
def __getattr__(self, item):
_dict = self.func.__options_dict__
if item in _dict:
return _dict[item].__get__(self, self.func)
return getattr(self.func, item)
class Configurable(metaclass=ConfigurableMeta):
"""
Generic class for configurable objects. Configurable objects have a dictionary of "options" descriptors that defines
the configuration schema of the type.
"""
def __new__(cls, *args, _final=False, **kwargs):
"""
Custom instance builder. If not all options are fulfilled, will return a :class:`PartiallyConfigured` instance
which is just a :class:`functools.partial` object that behaves like a :class:`Configurable` instance.
The special `_final` argument can be used to force final instance to be created, or an error raised if options
are missing.
:param args:
:param _final: bool
:param kwargs:
:return: Configurable or PartiallyConfigured
"""
options = tuple(cls.__options__)
# compute missing options, given the kwargs.
missing = set()
for name, option in options:
if option.required and not option.name in kwargs:
missing.add(name)
# transform positional arguments in keyword arguments if possible.
position = 0
for name, option in options:
if not option.positional:
break # option orders make all positional options first, job done.
if not isoption(getattr(cls, name)):
missing.discard(name)
continue
if len(args) <= position:
break # no more positional arguments given.
position += 1
missing.discard(name)
# complain if there is more options than possible.
extraneous = set(kwargs.keys()) - (set(next(zip(*options))) if len(options) else set())
if len(extraneous):
raise TypeError(
"{}() got {} unexpected option{}: {}.".format(
cls.__name__,
len(extraneous),
"s" if len(extraneous) > 1 else "",
", ".join(map(repr, sorted(extraneous))),
)
)
# missing options? we'll return a partial instance to finish the work later, unless we're required to be
# "final".
if len(missing):
if _final:
raise TypeError(
"{}() missing {} required option{}: {}.".format(
cls.__name__,
len(missing),
"s" if len(missing) > 1 else "",
", ".join(map(repr, sorted(missing))),
)
)
return PartiallyConfigured(cls, *args, **kwargs)
return super(Configurable, cls).__new__(cls)
def __init__(self, *args, **kwargs):
# initialize option's value dictionary, used by descriptor implementation (see Option).
self._options_values = {**kwargs}
# set option values.
for name, value in kwargs.items():
setattr(self, name, value)
position = 0
for name, option in self.__options__:
if not option.positional:
break # option orders make all positional options first
# value was overriden? Skip.
maybe_value = getattr(type(self), name)
if not isoption(maybe_value):
continue
if len(args) <= position:
break
if name in self._options_values:
raise ValueError("Already got a value for option {}".format(name))
setattr(self, name, args[position])
position += 1
def __call__(self, *args, **kwargs):
raise AbstractError(self.__call__)
@property
def __options__(self):
return type(self).__options__
@property
def __processors__(self):
return type(self).__processors__
| apache-2.0 |
rcordovano/autopsy | release_scripts/localization_scripts/test/test_csvutil.py | 3 | 1591 | import codecs
import os
import unittest
from typing import TypeVar, List
from csvutil import records_to_csv, csv_to_records
from test.unittestutil import get_output_path
class CsvUtilTest(unittest.TestCase):
T = TypeVar('T')
def assert_equal_arr(self, a: List[T], b: List[T]):
self.assertEqual(len(a), len(b), 'arrays are not equal length')
for i in range(0, len(a)):
if isinstance(a[i], list) and isinstance(b[i], list):
self.assert_equal_arr(a[i], b[i])
else:
self.assertEqual(a[i], b[i], "Items: {0} and {1} at index {2} are not equal.".format(a[i], b[i], i))
def test_read_write(self):
data = [['header1', 'header2', 'header3', 'additional header'],
['data1', 'data2', 'data3'],
['', 'data2-1', 'data2-2']]
os.makedirs(get_output_path(), exist_ok=True)
test_path = get_output_path('test.csv')
records_to_csv(test_path, data)
byte_inf = min(32, os.path.getsize(test_path))
with open(test_path, 'rb') as bom_test_file:
raw = bom_test_file.read(byte_inf)
if not raw.startswith(codecs.BOM_UTF8):
self.fail("written csv does not have appropriate BOM")
read_records_no_header, no_header = csv_to_records(test_path, header_row=False)
self.assert_equal_arr(read_records_no_header, data)
read_rows, header = csv_to_records(test_path, header_row=True)
self.assert_equal_arr(header, data[0])
self.assert_equal_arr(read_rows, [data[1], data[2]])
| apache-2.0 |
vponomaryov/rally | rally/plugins/openstack/scenarios/ceilometer/samples.py | 1 | 1179 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import validation
from rally import consts
from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils
class CeilometerSamples(ceiloutils.CeilometerScenario):
"""Benchmark scenarios for Ceilometer Samples API."""
@validation.required_services(consts.Service.CEILOMETER)
@validation.required_openstack(users=True)
@base.scenario()
def list_samples(self):
"""Fetch all samples.
This scenario fetches list of all samples.
"""
self._list_samples()
| apache-2.0 |
Frodox/buildbot | master/buildbot/www/hooks/bitbucketserver.py | 2 | 6256 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# Copyright Mamba Team
from __future__ import absolute_import
from __future__ import print_function
import json
from twisted.python import log
from buildbot.util import bytes2NativeString
GIT_BRANCH_REF = "refs/heads/{}"
GIT_MERGE_REF = "refs/pull-requests/{}/merge"
GIT_TAG_REF = "refs/tags/{}"
_HEADER_EVENT = b'X-Event-Key'
class BitbucketServerEventHandler(object):
def __init__(self, master, options=None):
if options is None:
options = {}
self.master = master
if not isinstance(options, dict):
options = {}
self.options = options
self._codebase = self.options.get('codebase', None)
def process(self, request):
payload = self._get_payload(request)
event_type = request.getHeader(_HEADER_EVENT)
event_type = bytes2NativeString(event_type)
log.msg("Processing event {header}: {event}"
.format(header=_HEADER_EVENT, event=event_type))
event_type = event_type.replace(":", "_")
handler = getattr(self, 'handle_{}'.format(event_type), None)
if handler is None:
raise ValueError('Unknown event: {}'.format(event_type))
return handler(payload)
def _get_payload(self, request):
content = request.content.read()
content = bytes2NativeString(content)
content_type = request.getHeader(b'Content-Type')
content_type = bytes2NativeString(content_type)
if content_type.startswith('application/json'):
payload = json.loads(content)
else:
raise ValueError('Unknown content type: {}'
.format(content_type))
log.msg("Payload: {}".format(payload))
return payload
def handle_repo_push(self, payload):
changes = []
project = payload['repository']['project']['name']
repo_url = payload['repository']['links']['self'][0]['href']
repo_url = repo_url.rstrip('browse')
for payload_change in payload['push']['changes']:
if payload_change['new']:
age = 'new'
category = 'push'
else: # when new is null the ref is deleted
age = 'old'
category = 'ref-deleted'
commit_hash = payload_change[age]['target']['hash']
if payload_change[age]['type'] == 'branch':
branch = GIT_BRANCH_REF.format(payload_change[age]['name'])
elif payload_change[age]['type'] == 'tag':
branch = GIT_TAG_REF.format(payload_change[age]['name'])
change = {
'revision': commit_hash,
'revlink': '{}commits/{}'.format(repo_url, commit_hash),
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['displayName'],
payload['actor']['username']),
'comments': 'Bitbucket Server commit {}'.format(commit_hash),
'branch': branch,
'project': project,
'category': category
}
if callable(self._codebase):
change['codebase'] = self._codebase(payload)
elif self._codebase is not None:
change['codebase'] = self._codebase
changes.append(change)
return (changes, payload['repository']['scmId'])
def handle_pullrequest_created(self, payload):
return self.handle_pullrequest(
payload,
GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
"pull-created")
def handle_pullrequest_updated(self, payload):
return self.handle_pullrequest(
payload,
GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
"pull-updated")
def handle_pullrequest_fulfilled(self, payload):
return self.handle_pullrequest(
payload,
GIT_BRANCH_REF.format(
payload['pullrequest']['toRef']['branch']['name']),
"pull-fulfilled")
def handle_pullrequest_rejected(self, payload):
return self.handle_pullrequest(
payload,
GIT_BRANCH_REF.format(
payload['pullrequest']['fromRef']['branch']['name']),
"pull-rejected")
def handle_pullrequest(self, payload, refname, category):
pr_number = int(payload['pullrequest']['id'])
repo_url = payload['repository']['links']['self'][0]['href']
repo_url = repo_url.rstrip('browse')
change = {
'revision': payload['pullrequest']['fromRef']['commit']['hash'],
'revlink': payload['pullrequest']['link'],
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['displayName'],
payload['actor']['username']),
'comments': 'Bitbucket Server Pull Request #{}'.format(pr_number),
'branch': refname,
'project': payload['repository']['project']['name'],
'category': category,
'properties': {'pullrequesturl': payload['pullrequest']['link']}
}
if callable(self._codebase):
change['codebase'] = self._codebase(payload)
elif self._codebase is not None:
change['codebase'] = self._codebase
return [change], payload['repository']['scmId']
def getChanges(self, request):
return self.process(request)
bitbucketserver = BitbucketServerEventHandler
| gpl-2.0 |
ocefpaf/cartopy | lib/cartopy/io/img_tiles.py | 1 | 22922 | # (C) British Crown Copyright 2011 - 2019, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Implements image tile identification and fetching from various sources.
The Matplotlib interface can make use of tile objects (defined below) via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_image` method. For example, to add a
:class:`MapQuest Open Aerial tileset <MapQuestOpenAerial>` to an existing axes
at zoom level 2, do ``ax.add_image(MapQuestOpenAerial(), 2)``. An example of
using tiles in this way can be found at the
:ref:`sphx_glr_gallery_eyja_volcano.py` example.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractmethod
import concurrent.futures
import warnings
from PIL import Image
import shapely.geometry as sgeom
import numpy as np
import six
import cartopy
import cartopy.crs as ccrs
class GoogleWTS(six.with_metaclass(ABCMeta, object)):
"""
Implement web tile retrieval using the Google WTS coordinate system.
A "tile" in this class refers to the coordinates (x, y, z).
"""
_MAX_THREADS = 24
def __init__(self, desired_tile_form='RGB',
user_agent='CartoPy/' + cartopy.__version__):
self.imgs = []
self.crs = ccrs.Mercator.GOOGLE
self.desired_tile_form = desired_tile_form
self.user_agent = user_agent
# some providers like osm need a user_agent in the request issue #1341
# osm may reject requests if there are too many of them, in which case
# a change of user_agent may fix the issue.
def image_for_domain(self, target_domain, target_z):
tiles = []
def fetch_tile(tile):
try:
img, extent, origin = self.get_image(tile)
except IOError:
# Some services 404 for tiles that aren't supposed to be
# there (e.g. out of range).
raise
img = np.array(img)
x = np.linspace(extent[0], extent[1], img.shape[1])
y = np.linspace(extent[2], extent[3], img.shape[0])
return img, x, y, origin
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._MAX_THREADS) as executor:
futures = []
for tile in self.find_images(target_domain, target_z):
futures.append(executor.submit(fetch_tile, tile))
for future in concurrent.futures.as_completed(futures):
try:
img, x, y, origin = future.result()
tiles.append([img, x, y, origin])
except IOError:
pass
img, extent, origin = _merge_tiles(tiles)
return img, extent, origin
def _find_images(self, target_domain, target_z, start_tile=(0, 0, 0)):
"""Target domain is a shapely polygon in native coordinates."""
assert isinstance(target_z, int) and target_z >= 0, ('target_z must '
'be an integer '
'>=0.')
# Recursively drill down to the images at the target zoom.
x0, x1, y0, y1 = self._tileextent(start_tile)
domain = sgeom.box(x0, y0, x1, y1)
if domain.intersects(target_domain):
if start_tile[2] == target_z:
yield start_tile
else:
for tile in self._subtiles(start_tile):
for result in self._find_images(target_domain, target_z,
start_tile=tile):
yield result
find_images = _find_images
def subtiles(self, x_y_z):
x, y, z = x_y_z
# Google tile specific (i.e. up->down).
for xi in range(0, 2):
for yi in range(0, 2):
yield x * 2 + xi, y * 2 + yi, z + 1
_subtiles = subtiles
def tile_bbox(self, x, y, z, y0_at_north_pole=True):
"""
Return the ``(x0, x1), (y0, y1)`` bounding box for the given x, y, z
tile position.
Parameters
----------
x
The x tile coordinate in the Google tile numbering system.
y
The y tile coordinate in the Google tile numbering system.
z
The z tile coordinate in the Google tile numbering system.
y0_at_north_pole: optional
Boolean representing whether the numbering of the y coordinate
starts at the north pole (as is the convention for Google tiles)
or not (in which case it will start at the south pole, as is the
convention for TMS). Defaults to True.
"""
n = 2 ** z
assert 0 <= x <= (n - 1), ("Tile's x index is out of range. Upper "
"limit %s. Got %s" % (n, x))
assert 0 <= y <= (n - 1), ("Tile's y index is out of range. Upper "
"limit %s. Got %s" % (n, y))
x0, x1 = self.crs.x_limits
y0, y1 = self.crs.y_limits
# Compute the box height and width in native coordinates
# for this zoom level.
box_h = (y1 - y0) / n
box_w = (x1 - x0) / n
# Compute the native x & y extents of the tile.
n_xs = x0 + (x + np.arange(0, 2, dtype=np.float64)) * box_w
n_ys = y0 + (y + np.arange(0, 2, dtype=np.float64)) * box_h
if y0_at_north_pole:
n_ys = -1 * n_ys[::-1]
return n_xs, n_ys
def tileextent(self, x_y_z):
"""Return extent tuple ``(x0,x1,y0,y1)`` in Mercator coordinates."""
x, y, z = x_y_z
x_lim, y_lim = self.tile_bbox(x, y, z, y0_at_north_pole=True)
return tuple(x_lim) + tuple(y_lim)
_tileextent = tileextent
@abstractmethod
def _image_url(self, tile):
pass
def get_image(self, tile):
if six.PY3:
from urllib.request import urlopen, Request, HTTPError, URLError
else:
from urllib2 import urlopen, Request, HTTPError, URLError
url = self._image_url(tile)
try:
request = Request(url, headers={"User-Agent": self.user_agent})
fh = urlopen(request)
im_data = six.BytesIO(fh.read())
fh.close()
img = Image.open(im_data)
except (HTTPError, URLError) as err:
print(err)
img = Image.fromarray(np.full((256, 256, 3), (250, 250, 250),
dtype=np.uint8))
img = img.convert(self.desired_tile_form)
return img, self.tileextent(tile), 'lower'
class GoogleTiles(GoogleWTS):
def __init__(self, desired_tile_form='RGB', style="street",
url=('https://mts0.google.com/vt/lyrs={style}'
'@177000000&hl=en&src=api&x={x}&y={y}&z={z}&s=G')):
"""
Parameters
----------
desired_tile_form: optional
Defaults to 'RGB'.
style: optional
The style for the Google Maps tiles. One of 'street',
'satellite', 'terrain', and 'only_streets'. Defaults to 'street'.
url: optional
URL pointing to a tile source and containing {x}, {y}, and {z}.
Such as: ``'https://server.arcgisonline.com/ArcGIS/rest/services/\
World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}.jpg'``
"""
styles = ["street", "satellite", "terrain", "only_streets"]
style = style.lower()
self.url = url
if style not in styles:
msg = "Invalid style '%s'. Valid styles: %s" % \
(style, ", ".join(styles))
raise ValueError(msg)
self.style = style
# The 'satellite' and 'terrain' styles require pillow with a jpeg
# decoder.
if self.style in ["satellite", "terrain"] and \
not hasattr(Image.core, "jpeg_decoder") or \
not Image.core.jpeg_decoder:
msg = "The '%s' style requires pillow with jpeg decoding support."
raise ValueError(msg % self.style)
return super(GoogleTiles, self).__init__(
desired_tile_form=desired_tile_form)
def _image_url(self, tile):
style_dict = {
"street": "m",
"satellite": "s",
"terrain": "t",
"only_streets": "h"}
url = self.url.format(
style=style_dict[self.style],
x=tile[0], X=tile[0],
y=tile[1], Y=tile[1],
z=tile[2], Z=tile[2])
return url
class MapQuestOSM(GoogleWTS):
# https://developer.mapquest.com/web/products/open/map for terms of use
# https://devblog.mapquest.com/2016/06/15/
# modernization-of-mapquest-results-in-changes-to-open-tile-access/
# this now requires a sign up to a plan
def _image_url(self, tile):
x, y, z = tile
url = 'https://otile1.mqcdn.com/tiles/1.0.0/osm/%s/%s/%s.jpg' % (
z, x, y)
mqdevurl = ('https://devblog.mapquest.com/2016/06/15/'
'modernization-of-mapquest-results-in-changes'
'-to-open-tile-access/')
warnings.warn('{} will require a log in and and will likely'
' fail. see {} for more details.'.format(url, mqdevurl))
return url
class MapQuestOpenAerial(GoogleWTS):
# https://developer.mapquest.com/web/products/open/map for terms of use
# The following attribution should be included in the resulting image:
# "Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture,
# Farm Service Agency"
def _image_url(self, tile):
x, y, z = tile
url = 'https://oatile1.mqcdn.com/tiles/1.0.0/sat/%s/%s/%s.jpg' % (
z, x, y)
return url
class OSM(GoogleWTS):
# https://operations.osmfoundation.org/policies/tiles/ for terms of use
def _image_url(self, tile):
x, y, z = tile
url = 'https://a.tile.openstreetmap.org/%s/%s/%s.png' % (z, x, y)
return url
class Stamen(GoogleWTS):
"""
Retrieves tiles from maps.stamen.com. Styles include
``terrain-background``, ``terrain``, ``toner`` and ``watercolor``.
For a full reference on the styles available please see
http://maps.stamen.com. Of particular note are the sub-styles
that are made available (e.g. ``terrain`` and ``terrain-background``).
To determine the name of the particular [sub-]style you want,
follow the link on http://maps.stamen.com to your desired style and
observe the style name in the URL. Your style name will be in the
form of: ``http://maps.stamen.com/{STYLE_NAME}/#9/37/-122``.
Except otherwise noted, the Stamen map tile sets are copyright Stamen
Design, under a Creative Commons Attribution (CC BY 3.0) license.
Please see the attribution notice at http://maps.stamen.com on how to
attribute this imagery.
"""
def __init__(self, style='toner', desired_tile_form='RGB'):
super(Stamen, self).__init__(desired_tile_form=desired_tile_form)
self.style = style
def _image_url(self, tile):
return ('http://tile.stamen.com/{self.style}/{z}/{x}/{y}.png'
.format(self=self, x=tile[0], y=tile[1], z=tile[2]))
class StamenTerrain(Stamen):
"""
**DEPRECATED:** This class is deprecated. Please use
``Stamen('terrain-background')`` instead.
Terrain tiles defined for the continental United States, and include land
color and shaded hills. The land colors are a custom palette developed by
Gem Spear for the National Atlas 1km land cover data set, which defines
twenty-four land classifications including five kinds of forest,
combinations of shrubs, grasses and crops, and a few tundras and wetlands.
The colors are at their highest contrast when fully zoomed-out to the
whole U.S., and they slowly fade out to pale off-white as you zoom in to
leave room for foreground data and break up the weirdness of large areas
of flat, dark green.
References
----------
* http://mike.teczno.com/notes/osm-us-terrain-layer/background.html
* http://maps.stamen.com/
* https://wiki.openstreetmap.org/wiki/List_of_OSM_based_Services
* https://github.com/migurski/DEM-Tools
"""
def __init__(self):
warnings.warn(
"The StamenTerrain class was deprecated in v0.17. "
"Please use Stamen('terrain-background') instead.",
DeprecationWarning,
stacklevel=2)
# NOTE: This subclass of Stamen exists for legacy reasons.
# No further Stamen subclasses will be accepted as
# they can easily be created in user code with Stamen(style_name).
return super(StamenTerrain, self).__init__(style='terrain-background')
class MapboxTiles(GoogleWTS):
"""
Implement web tile retrieval from Mapbox.
For terms of service, see https://www.mapbox.com/tos/.
"""
def __init__(self, access_token, map_id):
"""
Set up a new Mapbox tiles instance.
Access to Mapbox web services requires an access token and a map ID.
See https://www.mapbox.com/api-documentation/ for details.
Parameters
----------
access_token
A valid Mapbox API access token.
map_id
An ID for a publicly accessible map (provided by Mapbox).
This is the map whose tiles will be retrieved through this process.
"""
self.access_token = access_token
self.map_id = map_id
super(MapboxTiles, self).__init__()
def _image_url(self, tile):
x, y, z = tile
url = ('https://api.mapbox.com/v4/mapbox.{id}/{z}/{x}/{y}.png'
'?access_token={token}'.format(z=z, y=y, x=x,
id=self.map_id,
token=self.access_token))
return url
class MapboxStyleTiles(GoogleWTS):
"""
Implement web tile retrieval from a user-defined Mapbox style. For more
details on Mapbox styles, see
https://www.mapbox.com/studio-manual/overview/map-styling/.
For terms of service, see https://www.mapbox.com/tos/.
"""
def __init__(self, access_token, username, map_id):
"""
Set up a new instance to retrieve tiles from a Mapbox style.
Access to Mapbox web services requires an access token and a map ID.
See https://www.mapbox.com/api-documentation/ for details.
Parameters
----------
access_token
A valid Mapbox API access token.
username
The username for the Mapbox user who defined the Mapbox style.
map_id
A map ID for a map defined by a Mapbox style. This is the map whose
tiles will be retrieved through this process. Note that this style
may be private and if your access token does not have permissions
to view this style, then map tile retrieval will fail.
"""
self.access_token = access_token
self.username = username
self.map_id = map_id
super(MapboxStyleTiles, self).__init__()
def _image_url(self, tile):
x, y, z = tile
url = ('https://api.mapbox.com/styles/v1/'
'{user}/{mapid}/tiles/256/{z}/{x}/{y}'
'?access_token={token}'.format(z=z, y=y, x=x,
user=self.username,
mapid=self.map_id,
token=self.access_token))
return url
class QuadtreeTiles(GoogleWTS):
"""
Implement web tile retrieval using the Microsoft WTS quadkey coordinate
system.
A "tile" in this class refers to a quadkey such as "1", "14" or "141"
where the length of the quatree is the zoom level in Google Tile terms.
"""
def _image_url(self, tile):
url = ('http://ecn.dynamic.t1.tiles.virtualearth.net/comp/'
'CompositionHandler/{tile}?mkt=en-'
'gb&it=A,G,L&shading=hill&n=z'.format(tile=tile))
return url
def tms_to_quadkey(self, tms, google=False):
quadKey = ""
x, y, z = tms
# this algorithm works with google tiles, rather than tms, so convert
# to those first.
if not google:
y = (2 ** z - 1) - y
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
def quadkey_to_tms(self, quadkey, google=False):
# algorithm ported from
# https://msdn.microsoft.com/en-us/library/bb259689.aspx
assert isinstance(quadkey, six.string_types), \
'quadkey must be a string'
x = y = 0
z = len(quadkey)
for i in range(z, 0, -1):
mask = 1 << (i - 1)
if quadkey[z - i] == '0':
pass
elif quadkey[z - i] == '1':
x |= mask
elif quadkey[z - i] == '2':
y |= mask
elif quadkey[z - i] == '3':
x |= mask
y |= mask
else:
raise ValueError('Invalid QuadKey digit '
'sequence.' + str(quadkey))
# the algorithm works to google tiles, so convert to tms
if not google:
y = (2 ** z - 1) - y
return (x, y, z)
def subtiles(self, quadkey):
for i in range(4):
yield quadkey + str(i)
def tileextent(self, quadkey):
x_y_z = self.quadkey_to_tms(quadkey, google=True)
return GoogleWTS.tileextent(self, x_y_z)
def find_images(self, target_domain, target_z, start_tile=None):
"""
Find all the quadtrees at the given target zoom, in the given
target domain.
target_z must be a value >= 1.
"""
if target_z == 0:
raise ValueError('The empty quadtree cannot be returned.')
if start_tile is None:
start_tiles = ['0', '1', '2', '3']
else:
start_tiles = [start_tile]
for start_tile in start_tiles:
start_tile = self.quadkey_to_tms(start_tile, google=True)
for tile in GoogleWTS.find_images(self, target_domain, target_z,
start_tile=start_tile):
yield self.tms_to_quadkey(tile, google=True)
class OrdnanceSurvey(GoogleWTS):
"""
Implement web tile retrieval from Ordnance Survey map data.
To use this tile image source you will need to obtain an
API key from Ordnance Survey.
For more details on Ordnance Survey layer styles, see
https://apidocs.os.uk/docs/map-styles.
For the API framework agreement, see
https://developer.ordnancesurvey.co.uk/os-api-framework-agreement.
"""
# API Documentation: https://apidocs.os.uk/docs/os-maps-wmts
def __init__(self, apikey, layer='Road', desired_tile_form='RGB'):
"""
Parameters
----------
apikey: required
The authentication key provided by OS to query the maps API
layer: optional
The style of the Ordnance Survey map tiles. One of 'Outdoor',
'Road', 'Light', 'Night', 'Leisure'. Defaults to 'Road'.
Details about the style of layer can be found at:
- https://apidocs.os.uk/docs/layer-information
- https://apidocs.os.uk/docs/map-styles
desired_tile_form: optional
Defaults to 'RGB'.
"""
super(OrdnanceSurvey, self).__init__(
desired_tile_form=desired_tile_form)
self.apikey = apikey
if layer not in ['Outdoor', 'Road', 'Light', 'Night', 'Leisure']:
raise ValueError('Invalid layer {}'.format(layer))
self.layer = layer
def _image_url(self, tile):
x, y, z = tile
url = ('https://api2.ordnancesurvey.co.uk/'
'mapping_api/v1/service/wmts?'
'key={apikey}&height=256&width=256&tilematrixSet=EPSG%3A3857&'
'version=1.0.0&style=true&layer={layer}%203857&'
'SERVICE=WMTS&REQUEST=GetTile&format=image%2Fpng&'
'TileMatrix=EPSG%3A3857%3A{z}&TileRow={y}&TileCol={x}')
return url.format(z=z, y=y, x=x,
apikey=self.apikey,
layer=self.layer)
def _merge_tiles(tiles):
"""Return a single image, merging the given images."""
if not tiles:
raise ValueError('A non-empty list of tiles should '
'be provided to merge.')
xset = [set(x) for i, x, y, _ in tiles]
yset = [set(y) for i, x, y, _ in tiles]
xs = xset[0]
xs.update(*xset[1:])
ys = yset[0]
ys.update(*yset[1:])
xs = sorted(xs)
ys = sorted(ys)
other_len = tiles[0][0].shape[2:]
img = np.zeros((len(ys), len(xs)) + other_len, dtype=np.uint8) - 1
for tile_img, x, y, origin in tiles:
y_first, y_last = y[0], y[-1]
yi0, yi1 = np.where((y_first == ys) | (y_last == ys))[0]
if origin == 'upper':
yi0 = tile_img.shape[0] - yi0 - 1
yi1 = tile_img.shape[0] - yi1 - 1
start, stop, step = yi0, yi1, 1 if yi0 < yi1 else -1
if step == 1 and stop == img.shape[0] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
y_slice = slice(start, stop, step)
xi0, xi1 = np.where((x[0] == xs) | (x[-1] == xs))[0]
start, stop, step = xi0, xi1, 1 if xi0 < xi1 else -1
if step == 1 and stop == img.shape[1] - 1:
stop = None
elif step == -1 and stop == 0:
stop = None
else:
stop += step
x_slice = slice(start, stop, step)
img_slice = (y_slice, x_slice, Ellipsis)
if origin == 'lower':
tile_img = tile_img[::-1, ::]
img[img_slice] = tile_img
return img, [min(xs), max(xs), min(ys), max(ys)], 'lower'
| lgpl-3.0 |
dnlm92/chokoretto | temp/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
dparlevliet/zelenka-report-storage | server-db/twisted/test/plugin_basic.py | 62 | 1029 | # Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# Don't change the docstring, it's part of the tests
"""
I'm a test drop-in. The plugin system's unit tests use me. No one
else should.
"""
from zope.interface import classProvides
from twisted.plugin import IPlugin
from twisted.test.test_plugin import ITestPlugin, ITestPlugin2
class TestPlugin:
"""
A plugin used solely for testing purposes.
"""
classProvides(ITestPlugin,
IPlugin)
def test1():
pass
test1 = staticmethod(test1)
class AnotherTestPlugin:
"""
Another plugin used solely for testing purposes.
"""
classProvides(ITestPlugin2,
IPlugin)
def test():
pass
test = staticmethod(test)
class ThirdTestPlugin:
"""
Another plugin used solely for testing purposes.
"""
classProvides(ITestPlugin2,
IPlugin)
def test():
pass
test = staticmethod(test)
| lgpl-3.0 |
fbossy/SickRage | sickbeard/clients/transmission_client.py | 16 | 5249 | # Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import json
from base64 import b64encode
import sickbeard
from .generic import GenericClient
class TransmissionAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(TransmissionAPI, self).__init__('Transmission', host, username, password)
if not self.host.endswith('/'):
self.host = self.host + '/'
if self.rpcurl.startswith('/'):
self.rpcurl = self.rpcurl[1:]
if self.rpcurl.endswith('/'):
self.rpcurl = self.rpcurl[:-1]
self.url = self.host + self.rpcurl + '/rpc'
def _get_auth(self):
post_data = json.dumps({'method': 'session-get', })
try:
self.response = self.session.post(self.url, data=post_data.encode('utf-8'), timeout=120,
verify=sickbeard.TORRENT_VERIFY_CERT)
self.auth = re.search('X-Transmission-Session-Id:\s*(\w+)', self.response.text).group(1)
except:
return None
self.session.headers.update({'x-transmission-session-id': self.auth})
#Validating Transmission authorization
post_data = json.dumps({'arguments': {},
'method': 'session-get',
})
self._request(method='post', data=post_data)
return self.auth
def _add_torrent_uri(self, result):
arguments = {'filename': result.url,
'paused': 1 if sickbeard.TORRENT_PAUSED else 0,
'download-dir': sickbeard.TORRENT_PATH
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add',
})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _add_torrent_file(self, result):
arguments = {'metainfo': b64encode(result.content),
'paused': 1 if sickbeard.TORRENT_PAUSED else 0,
'download-dir': sickbeard.TORRENT_PATH
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-add',
})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_ratio(self, result):
ratio = None
if result.ratio:
ratio = result.ratio
mode = 0
if ratio:
if float(ratio) == -1:
ratio = 0
mode = 2
elif float(ratio) >= 0:
ratio = float(ratio)
mode = 1 # Stop seeding at seedRatioLimit
arguments = {'ids': [result.hash],
'seedRatioLimit': ratio,
'seedRatioMode': mode
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set',
})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
def _set_torrent_seed_time(self, result):
if sickbeard.TORRENT_SEED_TIME and sickbeard.TORRENT_SEED_TIME != -1:
time = int(60 * float(sickbeard.TORRENT_SEED_TIME))
arguments = {'ids': [result.hash],
'seedIdleLimit': time,
'seedIdleMode': 1
}
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set',
})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
else:
return True
def _set_torrent_priority(self, result):
arguments = {'ids': [result.hash]}
if result.priority == -1:
arguments['priority-low'] = []
elif result.priority == 1:
# set high priority for all files in torrent
arguments['priority-high'] = []
# move torrent to the top if the queue
arguments['queuePosition'] = 0
if sickbeard.TORRENT_HIGH_BANDWIDTH:
arguments['bandwidthPriority'] = 1
else:
arguments['priority-normal'] = []
post_data = json.dumps({'arguments': arguments,
'method': 'torrent-set',
})
self._request(method='post', data=post_data)
return self.response.json()['result'] == "success"
api = TransmissionAPI()
| gpl-3.0 |
titasakgm/brc-stock | openerp/addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/datatypes/models.py | 13 | 2338 | """
This is a basic model to test saving and loading boolean and date-related
types, which in the past were problematic for some database backends.
"""
from django.db import models
from django.conf import settings
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeField(null=True)
class Meta:
ordering = ('consumed_at',)
def __str__(self):
return self.name
__test__ = {'API_TESTS': """
# No donuts are in the system yet.
>>> Donut.objects.all()
[]
>>> d = Donut(name='Apple Fritter')
# Ensure we're getting True and False, not 0 and 1
>>> d.is_frosted
False
>>> d.has_sprinkles
>>> d.has_sprinkles = True
>>> d.has_sprinkles == True
True
>>> d.save()
>>> d2 = Donut.objects.all()[0]
>>> d2
<Donut: Apple Fritter>
>>> d2.is_frosted == False
True
>>> d2.has_sprinkles == True
True
>>> import datetime
>>> d2.baked_date = datetime.date(year=1938, month=6, day=4)
>>> d2.baked_time = datetime.time(hour=5, minute=30)
>>> d2.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
>>> d2.save()
>>> d3 = Donut.objects.all()[0]
>>> d3.baked_date
datetime.date(1938, 6, 4)
>>> d3.baked_time
datetime.time(5, 30)
>>> d3.consumed_at
datetime.datetime(2007, 4, 20, 16, 19, 59)
# Year boundary tests (ticket #3689)
>>> d = Donut(name='Date Test 2007', baked_date=datetime.datetime(year=2007, month=12, day=31), consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
>>> d.save()
>>> d1 = Donut(name='Date Test 2006', baked_date=datetime.datetime(year=2006, month=1, day=1), consumed_at=datetime.datetime(year=2006, month=1, day=1))
>>> d1.save()
>>> Donut.objects.filter(baked_date__year=2007)
[<Donut: Date Test 2007>]
>>> Donut.objects.filter(baked_date__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2007).order_by('name')
[<Donut: Apple Fritter>, <Donut: Date Test 2007>]
>>> Donut.objects.filter(consumed_at__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2005)
[]
>>> Donut.objects.filter(consumed_at__year=2008)
[]
"""}
| agpl-3.0 |
meghana1995/sympy | sympy/strategies/rl.py | 20 | 4295 | """ Generic Rules for SymPy
This file assumes knowledge of Basic and little else.
"""
from __future__ import print_function, division
from sympy.utilities.iterables import sift
from .util import new
# Functions that create rules
def rm_id(isid, new=new):
""" Create a rule to remove identities
isid - fn :: x -> Bool --- whether or not this element is an identity
>>> from sympy.strategies import rm_id
>>> from sympy import Basic
>>> remove_zeros = rm_id(lambda x: x==0)
>>> remove_zeros(Basic(1, 0, 2))
Basic(1, 2)
>>> remove_zeros(Basic(0, 0)) # If only identites then we keep one
Basic(0)
See Also:
unpack
"""
def ident_remove(expr):
""" Remove identities """
ids = list(map(isid, expr.args))
if sum(ids) == 0: # No identities. Common case
return expr
elif sum(ids) != len(ids): # there is at least one non-identity
return new(expr.__class__,
*[arg for arg, x in zip(expr.args, ids) if not x])
else:
return new(expr.__class__, expr.args[0])
return ident_remove
def glom(key, count, combine):
""" Create a rule to conglomerate identical args
>>> from sympy.strategies import glom
>>> from sympy import Add
>>> from sympy.abc import x
>>> key = lambda x: x.as_coeff_Mul()[1]
>>> count = lambda x: x.as_coeff_Mul()[0]
>>> combine = lambda cnt, arg: cnt * arg
>>> rl = glom(key, count, combine)
>>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
3*x + 5
Wait, how are key, count and combine supposed to work?
>>> key(2*x)
x
>>> count(2*x)
2
>>> combine(2, x)
2*x
"""
def conglomerate(expr):
""" Conglomerate together identical args x + x -> 2x """
groups = sift(expr.args, key)
counts = dict((k, sum(map(count, args))) for k, args in groups.items())
newargs = [combine(cnt, mat) for mat, cnt in counts.items()]
if set(newargs) != set(expr.args):
return new(type(expr), *newargs)
else:
return expr
return conglomerate
def sort(key, new=new):
""" Create a rule to sort by a key function
>>> from sympy.strategies import sort
>>> from sympy import Basic
>>> sort_rl = sort(str)
>>> sort_rl(Basic(3, 1, 2))
Basic(1, 2, 3)
"""
def sort_rl(expr):
return new(expr.__class__, *sorted(expr.args, key=key))
return sort_rl
def distribute(A, B):
""" Turns an A containing Bs into a B of As
where A, B are container types
>>> from sympy.strategies import distribute
>>> from sympy import Add, Mul, symbols
>>> x, y = symbols('x,y')
>>> dist = distribute(Mul, Add)
>>> expr = Mul(2, x+y, evaluate=False)
>>> expr
2*(x + y)
>>> dist(expr)
2*x + 2*y
"""
def distribute_rl(expr):
for i, arg in enumerate(expr.args):
if isinstance(arg, B):
first, b, tail = expr.args[:i], expr.args[i], expr.args[i+1:]
return B(*[A(*(first + (arg,) + tail)) for arg in b.args])
return expr
return distribute_rl
def subs(a, b):
""" Replace expressions exactly """
def subs_rl(expr):
if expr == a:
return b
else:
return expr
return subs_rl
# Functions that are rules
def unpack(expr):
""" Rule to unpack singleton args
>>> from sympy.strategies import unpack
>>> from sympy import Basic
>>> unpack(Basic(2))
2
"""
if len(expr.args) == 1:
return expr.args[0]
else:
return expr
def flatten(expr, new=new):
""" Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e)) """
cls = expr.__class__
args = []
for arg in expr.args:
if arg.__class__ == cls:
args.extend(arg.args)
else:
args.append(arg)
return new(expr.__class__, *args)
def rebuild(expr):
""" Rebuild a SymPy tree
This function recursively calls constructors in the expression tree.
This forces canonicalization and removes ugliness introduced by the use of
Basic.__new__
"""
try:
return type(expr)(*list(map(rebuild, expr.args)))
except Exception:
return expr
| bsd-3-clause |
Chilledheart/chromium | chrome/common/extensions/docs/server2/api_models.py | 37 | 7028 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from compiled_file_system import Cache, SingleFile, Unicode
from extensions_paths import API_PATHS
from features_bundle import HasParent, GetParentName
from file_system import FileNotFoundError
from future import All, Future, Race
from operator import itemgetter
from path_util import Join
from platform_util import PlatformToExtensionType
from schema_processor import SchemaProcessor, SchemaProcessorFactory
from third_party.json_schema_compiler.json_schema import DeleteNodes
from third_party.json_schema_compiler.model import Namespace, UnixName
def GetNodeCategories():
'''Returns a tuple of the possible categories a node may belong to.
'''
return ('types', 'functions', 'events', 'properties')
class ContentScriptAPI(object):
'''Represents an API available to content scripts.
|name| is the name of the API or API node this object represents.
|restrictedTo| is a list of dictionaries representing the nodes
of this API that are available to content scripts, or None if the
entire API is available to content scripts.
'''
def __init__(self, name):
self.name = name
self.restrictedTo = None
def __eq__(self, o):
return self.name == o.name and self.restrictedTo == o.restrictedTo
def __ne__(self, o):
return not (self == o)
def __repr__(self):
return ('<ContentScriptAPI name=%s, restrictedTo=%s>' %
(self.name, self.restrictedTo))
def __str__(self):
return repr(self)
class APIModels(object):
'''Tracks APIs and their Models.
'''
def __init__(self,
features_bundle,
compiled_fs_factory,
file_system,
object_store_creator,
platform,
schema_processor_factory):
self._features_bundle = features_bundle
self._platform = PlatformToExtensionType(platform)
self._model_cache = compiled_fs_factory.Create(
file_system, self._CreateAPIModel, APIModels, category=self._platform)
self._object_store = object_store_creator.Create(APIModels)
self._schema_processor = Future(callback=lambda:
schema_processor_factory.Create(False))
@Cache
@SingleFile
@Unicode
def _CreateAPIModel(self, path, data):
def does_not_include_platform(node):
return ('extension_types' in node and
node['extension_types'] != 'all' and
self._platform not in node['extension_types'])
schema = self._schema_processor.Get().Process(path, data)[0]
if not schema:
raise ValueError('No schema for %s' % path)
return Namespace(DeleteNodes(
schema, matcher=does_not_include_platform), path)
def GetNames(self):
# API names appear alongside some of their methods/events/etc in the
# features file. APIs are those which either implicitly or explicitly have
# no parent feature (e.g. app, app.window, and devtools.inspectedWindow are
# APIs; runtime.onConnectNative is not).
api_features = self._features_bundle.GetAPIFeatures().Get()
return [name for name, feature in api_features.iteritems()
if not HasParent(name, feature, api_features)]
def _GetPotentialPathsForModel(self, api_name):
'''Returns the list of file system paths that the model for |api_name|
might be located at.
'''
# By default |api_name| is assumed to be given without a path or extension,
# so combinations of known paths and extension types will be searched.
api_extensions = ('.json', '.idl')
api_paths = API_PATHS
# Callers sometimes include a file extension and/or prefix path with the
# |api_name| argument. We believe them and narrow the search space
# accordingly.
name, ext = posixpath.splitext(api_name)
if ext in api_extensions:
api_extensions = (ext,)
api_name = name
for api_path in api_paths:
if api_name.startswith(api_path):
api_name = api_name[len(api_path):]
api_paths = (api_path,)
break
# API names are given as declarativeContent and app.window but file names
# will be declarative_content and app_window.
file_name = UnixName(api_name).replace('.', '_')
# Devtools APIs are in API/devtools/ not API/, and have their
# "devtools" names removed from the file names.
basename = posixpath.basename(file_name)
if 'devtools_' in basename:
file_name = posixpath.join(
'devtools', file_name.replace(basename,
basename.replace('devtools_' , '')))
return [Join(path, file_name + ext) for ext in api_extensions
for path in api_paths]
def GetModel(self, api_name):
futures = [self._model_cache.GetFromFile(path)
for path in self._GetPotentialPathsForModel(api_name)]
return Race(futures, except_pass=(FileNotFoundError, ValueError))
def GetContentScriptAPIs(self):
'''Creates a dict of APIs and nodes supported by content scripts in
this format:
{
'extension': '<ContentScriptAPI name='extension',
restrictedTo=[{'node': 'onRequest'}]>',
...
}
'''
content_script_apis_future = self._object_store.Get('content_script_apis')
api_features_future = self._features_bundle.GetAPIFeatures()
def resolve():
content_script_apis = content_script_apis_future.Get()
if content_script_apis is not None:
return content_script_apis
api_features = api_features_future.Get()
content_script_apis = {}
for name, feature in api_features.iteritems():
if 'content_script' not in feature.get('contexts', ()):
continue
parent = GetParentName(name, feature, api_features)
if parent is None:
content_script_apis[name] = ContentScriptAPI(name)
else:
# Creates a dict for the individual node.
node = {'node': name[len(parent) + 1:]}
if parent not in content_script_apis:
content_script_apis[parent] = ContentScriptAPI(parent)
if content_script_apis[parent].restrictedTo:
content_script_apis[parent].restrictedTo.append(node)
else:
content_script_apis[parent].restrictedTo = [node]
self._object_store.Set('content_script_apis', content_script_apis)
return content_script_apis
return Future(callback=resolve)
def Refresh(self):
futures = [self.GetModel(name) for name in self.GetNames()]
return All(futures, except_pass=(FileNotFoundError, ValueError))
def IterModels(self):
future_models = [(name, self.GetModel(name)) for name in self.GetNames()]
for name, future_model in future_models:
try:
model = future_model.Get()
except FileNotFoundError:
continue
if model:
yield name, model
| bsd-3-clause |
Yannig/ansible-modules-core | cloud/amazon/rds_subnet_group.py | 17 | 5033 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: Scott Anderson
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Add or change a subnet group
- rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- rds_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto.rds
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = boto.rds.connect_to_region(region, **aws_connect_kwargs)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
except BotoServerError, e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
zstackio/zstack-woodpecker | integrationtest/vm/vpc/test_two_vpc_two_vm_connection.py | 2 | 2156 | '''
@author: FangSun
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.host_operations as host_ops
import random
import os
import zstackwoodpecker.operations.volume_operations as vol_ops
import time
from itertools import izip
VPC1_VLAN , VPC1_VXLAN = ['l3VlanNetwork2', "l3VxlanNetwork12"]
VPC2_VLAN, VPC2_VXLAN = ["l3VlanNetwork3", "l3VxlanNetwork13"]
vpc_l3_list = [(VPC1_VLAN , VPC1_VXLAN), (VPC2_VLAN, VPC2_VXLAN)]
vpc_name_list =['vpc1', 'vpc2']
case_flavor = dict(vm1_l3_vlan_vm2_l3_vlan= dict(vm1l3=VPC1_VLAN, vm2l3=VPC2_VLAN),
vm1_l3_vxlan_vm2_l3_vxlan= dict(vm1l3=VPC1_VXLAN, vm2l3=VPC2_VXLAN),
vm1_l3_vlan_vm2_l3_vxlan= dict(vm1l3=VPC1_VLAN, vm2l3=VPC2_VXLAN),
)
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
vr_list= []
def test():
flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
test_util.test_dsc("create vpc vrouter and attach vpc l3 to vpc")
for vpc_name in vpc_name_list:
vr_list.append(test_stub.create_vpc_vrouter(vpc_name))
for vr, l3_list in izip(vr_list, vpc_l3_list):
test_stub.attach_l3_to_vpc_vr(vr, l3_list)
vm1, vm2 = [test_stub.create_vm_with_random_offering(vm_name='vpc_vm_{}'.format(name), l3_name=name) for name in (flavor['vm1l3'], flavor['vm2l3'])]
[test_obj_dict.add_vm(vm) for vm in (vm1,vm2)]
[vm.check() for vm in (vm1,vm2)]
test_util.test_dsc("test two vm connectivity")
[test_stub.run_command_in_vm(vm.get_vm(), 'iptables -F') for vm in (vm1,vm2)]
test_stub.check_icmp_between_vms(vm1, vm2, expected_result='FAIL')
test_stub.check_tcp_between_vms(vm1, vm2, [], ["22"])
test_lib.lib_error_cleanup(test_obj_dict)
test_stub.remove_all_vpc_vrouter()
def env_recover():
test_lib.lib_error_cleanup(test_obj_dict)
test_stub.remove_all_vpc_vrouter()
| apache-2.0 |
speedovation/CrashReporter-Qt-Breakpad | breakpad/src/tools/python/filter_syms.py | 76 | 8032 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Normalizes and de-duplicates paths within Breakpad symbol files.
When using DWARF for storing debug symbols, some file information will be
stored relative to the current working directory of the current compilation
unit, and may be further relativized based upon how the file was #included.
This helper can be used to parse the Breakpad symbol file generated from such
DWARF files and normalize and de-duplicate the FILE records found within,
updating any references to the FILE records in the other record types.
"""
import macpath
import ntpath
import optparse
import os
import posixpath
import sys
class BreakpadParseError(Exception):
"""Unsupported Breakpad symbol record exception class."""
pass
class SymbolFileParser(object):
"""Parser for Breakpad symbol files.
The format of these files is documented at
https://code.google.com/p/google-breakpad/wiki/SymbolFiles
"""
def __init__(self, input_stream, output_stream, ignored_prefixes=None,
path_handler=os.path):
"""Inits a SymbolFileParser to read symbol records from |input_stream| and
write the processed output to |output_stream|.
|ignored_prefixes| contains a list of optional path prefixes that
should be stripped from the final, normalized path outputs.
For example, if the Breakpad symbol file had all paths starting with a
common prefix, such as:
FILE 1 /b/build/src/foo.cc
FILE 2 /b/build/src/bar.cc
Then adding "/b/build/src" as an ignored prefix would result in an output
file that contained:
FILE 1 foo.cc
FILE 2 bar.cc
Note that |ignored_prefixes| does not necessarily contain file system
paths, as the contents of the DWARF DW_AT_comp_dir attribute is dependent
upon the host system and compiler, and may contain additional information
such as hostname or compiler version.
"""
self.unique_files = {}
self.duplicate_files = {}
self.input_stream = input_stream
self.output_stream = output_stream
self.ignored_prefixes = ignored_prefixes or []
self.path_handler = path_handler
def Process(self):
"""Processes the Breakpad symbol file."""
for line in self.input_stream:
parsed = self._ParseRecord(line.rstrip())
if parsed:
self.output_stream.write(parsed + '\n')
def _ParseRecord(self, record):
"""Parses a single Breakpad symbol record - a single line from the symbol
file.
Returns:
The modified string to write to the output file, or None if no line
should be written.
"""
record_type = record.partition(' ')[0]
if record_type == 'FILE':
return self._ParseFileRecord(record)
elif self._IsLineRecord(record_type):
return self._ParseLineRecord(record)
else:
# Simply pass the record through unaltered.
return record
def _NormalizePath(self, path):
"""Normalizes a file path to its canonical form.
As this may not execute on the machine or file system originally
responsible for compilation, it may be necessary to further correct paths
for symlinks, junctions, or other such file system indirections.
Returns:
A unique, canonical representation for the the file path.
"""
return self.path_handler.normpath(path)
def _AdjustPath(self, path):
"""Adjusts the supplied path after performing path de-duplication.
This may be used to perform secondary adjustments, such as removing a
common prefix, such as "/D/build", or replacing the file system path with
information from the version control system.
Returns:
The actual path to use when writing the FILE record.
"""
return path[len(filter(path.startswith,
self.ignored_prefixes + [''])[0]):]
def _ParseFileRecord(self, file_record):
"""Parses and corrects a FILE record."""
file_info = file_record[5:].split(' ', 3)
if len(file_info) > 2:
raise BreakpadParseError('Unsupported FILE record: ' + file_record)
file_index = int(file_info[0])
file_name = self._NormalizePath(file_info[1])
existing_file_index = self.unique_files.get(file_name)
if existing_file_index is None:
self.unique_files[file_name] = file_index
file_info[1] = self._AdjustPath(file_name)
return 'FILE ' + ' '.join(file_info)
else:
self.duplicate_files[file_index] = existing_file_index
return None
def _IsLineRecord(self, record_type):
"""Determines if the current record type is a Line record"""
try:
line = int(record_type, 16)
except (ValueError, TypeError):
return False
return True
def _ParseLineRecord(self, line_record):
"""Parses and corrects a Line record."""
line_info = line_record.split(' ', 5)
if len(line_info) > 4:
raise BreakpadParseError('Unsupported Line record: ' + line_record)
file_index = int(line_info[3])
line_info[3] = str(self.duplicate_files.get(file_index, file_index))
return ' '.join(line_info)
def main():
option_parser = optparse.OptionParser()
option_parser.add_option("-p", "--prefix",
action="append", dest="prefixes", type="string",
default=[],
help="A path prefix that should be removed from "
"all FILE lines. May be repeated to specify "
"multiple prefixes.")
option_parser.add_option("-t", "--path_type",
action="store", type="choice", dest="path_handler",
choices=['win32', 'posix'],
help="Indicates how file paths should be "
"interpreted. The default is to treat paths "
"the same as the OS running Python (eg: "
"os.path)")
options, args = option_parser.parse_args()
if args:
option_parser.error('Unknown argument: %s' % args)
path_handler = { 'win32': ntpath,
'posix': posixpath }.get(options.path_handler, os.path)
try:
symbol_parser = SymbolFileParser(sys.stdin, sys.stdout, options.prefixes,
path_handler)
symbol_parser.Process()
except BreakpadParseError, e:
print >> sys.stderr, 'Got an error while processing symbol file'
print >> sys.stderr, str(e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
hobson/pug-dj | pug/dj/sqlserver.py | 1 | 7833 | from django.db import connections #, transaction
from collections import OrderedDict, namedtuple
from django.conf import settings
from django.db import DatabaseError
from traceback import print_exc
from ansi.constants import field_type_name
def dictfetchall(cursor):
"""Returns all rows from a cursor as a dict (rather than a headerless table)
From Django Documentation: https://docs.djangoproject.com/en/dev/topics/db/sql/
"""
desc = cursor.description
return [dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()]
def dicts_from_table(table, keys=None):
"""Returns a list of dict() objects, one for each row in a list of lists (table)"""
lod = []
for row in table:
if not keys:
keys = row
if not all((k not in (None, '') or bool(k.strip())) for k in keys):
keys = None
continue
lod += [OrderedDict((k, v) for k, v in zip(keys, row))]
return lod
def count(cursor, field, table, db, distinct=False):
sql = 'USE %s; SELECT COUNT(DISTINCT %s) FROM %s;' % (db, field, table)
return dictfetchall(cursor.execute(sql).fetchall())
def namedtuples_from_table(table, keys=None):
lont = []
for row in table:
if not keys:
keys = row
if not all((k not in (None, '') or bool(k.strip())) for k in keys):
keys = None
NamedTuple = namedtuple('NamedTuple', ' '.join([k.strip() for k in keys]))
#print keys
#print NamedTuple
continue
lont += [NamedTuple(*row)]
#print lont
return lont
# FIXME: this gives a much different answer than django ORM queries used in inspectdb
def column_properties_sql(table_name):
"""SQL query string for retrieving column names and properties for a sqlserver db table
from marc_s at http://stackoverflow.com/a/2418665/623735"""
sql = """
SELECT
c.name 'Column Name',
t.Name 'Data type',
c.max_length 'Max Length',
c.precision ,
c.scale ,
c.is_nullable,
ISNULL(i.is_primary_key, 0) 'Primary Key'
FROM
sys.columns c
INNER JOIN
sys.types t ON c.system_type_id = t.system_type_id
LEFT OUTER JOIN
sys.index_columns ic ON ic.object_id = c.object_id AND ic.column_id = c.column_id
LEFT OUTER JOIN
sys.indexes i ON ic.object_id = i.object_id AND ic.index_id = i.index_id
WHERE
c.object_id = OBJECT_ID('""" + str(table_name) + "')"
#print sql
return sql
def make_cursor(cursor='default'):
# a connection might need to be converted to a cursor, cursors have a noncallable attribute "cursor" so watch out
if hasattr(cursor, 'cursor') and callable(cursor.cursor):
cursor = cursor.cursor()
# a db_alias might need to be converted to a cursor
elif isinstance(cursor, basestring) and cursor in settings.DATABASES:
cursor = connections[cursor].cursor()
return cursor
def datatype(dbtype, description, cursor):
"""Google AppEngine Helper to convert a data type into a string."""
dt = cursor.db.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
def get_meta_table(cursor='default', table=None, verbosity=0):
cursor = make_cursor(cursor)
# from dev branch of Django
# FieldInfo = namedtuple('FieldInfo','name type_code display_size internal_size precision scale null_ok')
#pep249 http://www.python.org/dev/peps/pep-0249
#0: name (mandatory)
#1: type_code (mandatory)
#2: display_size (optional/nullable)
#3: internal_size (optional/nullable)
#4: precision (optional/nullable)
#5: scale (optional/nullable)
#6: null_ok (optional/nullable)
#pep249 implementation by psycopg.cursor.description
#0: name: the name of the column returned.
#1: type_code: the PostgreSQL OID of the column. You can use the pg_type system table to get more informations about the type. This is the value used by Psycopg to decide what Python type use to represent the value. See also Type casting of SQL types into Python objects.
#2: display_size: the actual length of the column in bytes. Obtaining this value is computationally intensive, so it is always None unless the PSYCOPG_DISPLAY_SIZE parameter is set at compile time. See also PQgetlength.
#3: internal_size: the size in bytes of the column associated to this column on the server. Set to a negative value for variable-size types See also PQfsize.
#4: precision: total number of significant digits in columns of type NUMERIC. None for other types.
#5: scale: count of decimal digits in the fractional part in columns of type NUMERIC. None for other types.
#6: null_ok: always None as not easy to retrieve from the libpq.
#pyodbc.cursor.columns implementation:
#0: table_cat
#1: table_schem
#2: table_name
#3: column_name
#4: data_type
#5: type_name
#6: column_size
#7: buffer_length
#8: decimal_digits
#9: num_prec_radix
#10: nullable
#11: remarks
#12: column_def
#13: sql_data_type
#14: sql_datetime_sub
#15: char_octet_length
#16: ordinal_position
#17: is_nullable: One of SQL_NULLABLE, SQL_NO_NULLS, SQL_NULLS_UNKNOWN.
#augmented pep249 provided here(TBD)
#7: primary_key (True if the column value is used as an index or primary key)
#8: num_distinct (number of distinct values)
#9: num_null (number of null values)
#10: max_value
#11: min_value
# # custom microsoft SQL query of metadata (works poorly in comparison to others)
# if cursor.cursor.__class__.__module__.endswith('odbc.base'):
# ans += [[c[0], c[1], c[2], c[2], c[3], c[4], c[5], c[6]] for c in cursor.execute(column_properties_sql(table)).fetchall()]
# # pyodbc
# elif hasattr(cursor, 'columns') and callable(cursor.columns):
# ans += [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table)]
# # psycopg
# else:
if cursor.cursor.__class__.__module__.endswith('odbc.base'):
meta_table = [[c[0], c[1], c[2], c[2], c[3], c[4], c[5], c[6]] for c in cursor.execute(column_properties_sql(table)).fetchall()]
else:
try:
meta_table = list(list(row) for row in cursor.db.introspection.get_table_description(cursor, table))
except DatabaseError, e:
meta_table = []
if verbosity > 0:
print_exc()
print "DatabaseError: Unable to find meta data for table %r using cursor %r (db_alias %s) because of %s." % (table, cursor, cursor.db.alias, e)
return meta_table
for i, row in enumerate(meta_table):
meta_table[i][1] = field_type_name.get(row[1], '').lower() or row[1]
ans = [('name', 'type', 'display_size', 'internal_size', 'precision', 'scale', 'null_ok', 'primary_key')]
ans += [list(c) + [None] for c in meta_table]
if verbosity > 2:
print ans
return ans
DATATYPE_TO_FIELDTYPE = {'int': 'IntegerField', 'float': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'Decimal': 'DecimalField'}
def datatype_to_fieldtype(datatype):
return DATATYPE_TO_FIELDTYPE.get(datatype, 'TextField')
def get_meta_tuples(cursor='default', table=None, verbosity=0):
return namedtuples_from_table(get_meta_table(cursor=cursor, table=table, verbosity=verbosity))
def get_meta_dicts(cursor='default', table=None, verbosity=0):
return dicts_from_table(get_meta_table(cursor=cursor, table=table, verbosity=verbosity))
def primary_keys(cursor='default', table=None):
list_of_fields = get_meta_tuples(cursor=cursor, table=table)
return [field.name for field in list_of_fields if field['primary_key']] | mit |
alexcuellar/odoo | addons/website/models/website.py | 25 | 35943 | # -*- coding: utf-8 -*-
import cStringIO
import contextlib
import datetime
import hashlib
import inspect
import logging
import math
import mimetypes
import unicodedata
import os
import re
import time
import urlparse
from PIL import Image
from sys import maxint
import werkzeug
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools import html_escape as escape, ustr, image_resize_and_sharpen, image_save_for_web
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(local_url, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = local_url.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
local_url = '/'.join(spath)
try:
# Try to match an endpoint in werkzeug's routing table
url = local_url.split('?')
path = url[0]
query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
# Force to check method to POST. Odoo uses methods : ['POST'] and ['GET', 'POST']
func = router.match(path, method='POST', query_args=query_string)[0]
return (func.routing.get('website', False) and
func.routing.get('multilang', func.routing['type'] == 'http'))
except Exception:
return False
def slugify(s, max_length=None):
""" Transform a string to a slug that can be used in a url path.
This method will first try to do the job with python-slugify if present.
Otherwise it will process string by stripping leading and ending spaces,
converting unicode chars to ascii, lowering all chars and replacing spaces
and underscore with hyphen "-".
:param s: str
:param max_length: int
:rtype: str
"""
s = ustr(s)
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')
slug = re.sub('[\W_]', ' ', uni).strip().lower()
slug = re.sub('[-\s]+', '-', slug)
return slug[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value.display_name
else:
# assume name_search result tuple
id, name = value
slugname = slugify(name or '').strip().strip('-')
if not slugname:
return str(id)
return "%s-%d" % (slugname, id)
# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags
_UNSLUG_RE = re.compile(r'(?:(\w{1,2}|\w[A-Za-z0-9-_]+?\w)-)?(-?\d+)(?=$|/)')
def unslug(s):
"""Extract slug and id from a string.
Always return un 2-tuple (str|None, int|None)
"""
m = _UNSLUG_RE.match(s)
if not m:
return None, None
return m.group(1), int(m.group(2))
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language", required=True),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.main_company'),
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
name = (name or "").replace("/page/website.", "").replace("/page/", "")
if not name:
return False
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0])
def get_alternate_languages(self, cr, uid, ids, req=None, context=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website(cr, uid, context=context).default_lang_code
shorts = []
def get_url_localized(router, lang):
arguments = dict(request.endpoint_arguments)
for k, v in arguments.items():
if isinstance(v, orm.browse_record):
arguments[k] = v.with_context(lang=lang)
return router.build(request.endpoint, arguments)
router = request.httprequest.app.get_db_router(request.db).bind('')
for code, name in self.get_languages(cr, uid, ids, context=context):
lg_path = ('/' + code) if code != default else ''
lg = code.split('_')
shorts.append(lg[0])
uri = request.endpoint and get_url_localized(router, code) or request.httprequest.path
if req.query_string:
uri += '?' + req.query_string
lang = {
'hreflang': ('-'.join(lg)).lower(),
'short': lg[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def is_publisher(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context=context)
return is_website_publisher
def is_user(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
return Access.check(cr, uid, 'ir.ui.menu', 'read', False, context=context)
def get_template(self, cr, uid, ids, template, context=None):
if isinstance(template, (int, long)):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page if str(page).isdigit() else 1), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = endpoint.routing.get('methods') or ['GET']
converters = rule._converters.values()
if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
and all(hasattr(converter, 'generate') for converter in converters)
and endpoint.routing.get('website')):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_set = set()
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
convitems = converters.items()
# converters with a domain are processed after the other ones
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
for (i,(name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i==(len(convitems)-1) and query_string
for v in converter.generate(request.cr, uid, query=query, args=val, context=context):
newval.append( val.copy() )
v[name] = v['loc']
del v['loc']
newval[-1].update(v)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
page = {'loc': url}
for key,val in value.items():
if key.startswith('__'):
page[key[2:]] = val
if url in ('/sitemap.xml',):
continue
if url in url_set:
continue
url_set.add(url)
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
name = (needle or "").replace("/page/website.", "").replace("/page/", "")
name = slugify(name, max_length=50)
res = []
for page in self.enumerate_pages(cr, uid, ids, query_string=name, context=context):
res.append(page)
if len(res) == limit:
break
return res
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
def _image_placeholder(self, response):
# file_open may return a StringIO. StringIO can be closed but are
# not context managers in Python 2 though that is fixed in 3
with contextlib.closing(openerp.tools.misc.file_open(
os.path.join('web', 'static', 'src', 'img', 'placeholder.png'),
mode='rb')) as f:
response.data = f.read()
return response.make_conditional(request.httprequest)
def _image(self, cr, uid, model, id, field, response, max_width=maxint, max_height=maxint, cache=None, context=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
Resizing is bypassed if the object provides a $field_big, which will
be interpreted as a pre-resized version of the base field.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~._image_placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
Model = self.pool[model]
id = int(id)
ids = None
if Model.check_access_rights(cr, uid, 'read', raise_exception=False):
ids = Model.search(cr, uid,
[('id', '=', id)], context=context)
if not ids and 'website_published' in Model._fields:
ids = Model.search(cr, openerp.SUPERUSER_ID,
[('id', '=', id), ('website_published', '=', True)], context=context)
if not ids:
return self._image_placeholder(response)
concurrency = '__last_update'
[record] = Model.read(cr, openerp.SUPERUSER_ID, [id],
[concurrency, field],
context=context)
if concurrency in record:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format)
# Field does not exist on model or field set to False
if not record.get(field):
# FIXME: maybe a field which does not exist should be a 404?
return self._image_placeholder(response)
response.set_etag(hashlib.sha1(record[field]).hexdigest())
response.make_conditional(request.httprequest)
if cache:
response.cache_control.max_age = cache
response.expires = int(time.time() + cache)
# conditional request match
if response.status_code == 304:
return response
data = record[field].decode('base64')
image = Image.open(cStringIO.StringIO(data))
response.mimetype = Image.MIME[image.format]
filename = '%s_%s.%s' % (model.replace('.', '_'), id, str(image.format).lower())
response.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
if (not max_width) and (not max_height):
response.data = data
return response
w, h = image.size
max_w = int(max_width) if max_width else maxint
max_h = int(max_height) if max_height else maxint
if w < max_w and h < max_h:
response.data = data
else:
size = (max_w, max_h)
img = image_resize_and_sharpen(image, size, preserve_aspect_ratio=True)
image_save_for_web(img, response.stream, format=image.format)
# invalidate content-length computed by make_conditional as
# writing to response.stream does not do it (as of werkzeug 0.9.3)
del response.headers['Content-Length']
return response
def image_url(self, cr, uid, record, field, size=None, context=None):
"""Returns a local url that points to the image field of a given browse record."""
model = record._name
sudo_record = record.sudo()
id = '%s_%s' % (record.id, hashlib.sha1(sudo_record.write_date or sudo_record.create_date or '').hexdigest()[0:7])
size = '' if size is None else '/%s' % size
return '/website/image/%s/%s/%s%s' % (model, id, field, size)
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'url': fields.char('Url'),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = self.pool['website'].image_url(cr, uid, attach, 'datas')
return result
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
attachments = self.read(cr, uid, ids, ['res_model'], context=context)
view_attachment_ids = [attachment['id'] for attachment in attachments if attachment['res_model'] == 'ir.ui.view']
for attach in self.read(cr, uid, view_attachment_ids, ['res_model', 'res_id', 'type', 'datas'], context=context):
result[attach['id']] = self._compute_checksum(attach)
return result
def _compute_checksum(self, attachment_dict):
if attachment_dict.get('res_model') == 'ir.ui.view'\
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
and attachment_dict.get('type', 'binary') == 'binary'\
and attachment_dict.get('datas'):
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
return None
def _datas_big(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
if context and context.get('bin_size'):
return result
for record in self.browse(cr, uid, ids, context=context):
if record.res_model != 'ir.ui.view' or not record.datas: continue
try:
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
except IOError: # apparently the error PIL.Image.open raises
pass
return result
_columns = {
'datas_checksum': fields.function(_datas_checksum, size=40,
string="Datas checksum", type='char', store=True, select=True),
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
'datas_big': fields.function (_datas_big, type='binary', store=True,
string="Resized file content"),
'mimetype': fields.char('Mime Type', readonly=True),
}
def _add_mimetype_if_needed(self, values):
if values.get('datas_fname'):
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
def create(self, cr, uid, values, context=None):
chk = self._compute_checksum(values)
if chk:
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
if match:
return match[0]
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).create(
cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = escape(attachment.website_url)
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('//maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=10, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': zoom,
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
| agpl-3.0 |
thiriel/maps | venv/lib/python2.7/site-packages/django/views/generic/create_update.py | 87 | 8928 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
DeprecationWarning
)
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
| bsd-3-clause |
almarklein/visvis.dev | examples/embeddingInQt4.py | 5 | 1816 | #!/usr/bin/env python
"""
This example illustrates embedding a visvis figure in a Qt application.
"""
from PyQt4 import QtGui, QtCore
import visvis as vv
# Create a visvis app instance, which wraps a qt4 application object.
# This needs to be done *before* instantiating the main window.
app = vv.use('qt4')
class MainWindow(QtGui.QWidget):
def __init__(self, *args):
QtGui.QWidget.__init__(self, *args)
# Make a panel with a button
self.panel = QtGui.QWidget(self)
but = QtGui.QPushButton(self.panel)
but.setText('Push me')
# Make figure using "self" as a parent
self.fig = vv.backends.backend_qt4.Figure(self)
# Make sizer and embed stuff
self.sizer = QtGui.QHBoxLayout(self)
self.sizer.addWidget(self.panel, 1)
self.sizer.addWidget(self.fig._widget, 2)
# Make callback
but.pressed.connect(self._Plot)
# Apply sizers
self.setLayout(self.sizer)
# Finish
self.resize(560, 420)
self.setWindowTitle('Embedding in Qt')
self.show()
def _Plot(self):
# Make sure our figure is the active one.
# If only one figure, this is not necessary.
#vv.figure(self.fig.nr)
# Clear it
vv.clf()
# Plot
vv.plot([1,2,3,1,6])
vv.legend(['this is a line'])
#self.fig.DrawNow()
# Two ways to create the application and start the main loop
if True:
# The visvis way. Will run in interactive mode when used in IEP or IPython.
app.Create()
m = MainWindow()
app.Run()
else:
# The native way.
qtApp = QtGui.QApplication([''])
m = MainWindow()
qtApp.exec_()
| bsd-3-clause |
golivieri/ansible | lib/ansible/runner/action_plugins/async.py | 6 | 1940 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' transfer the given module name, plus the async module, then run it '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
if tmp.find("tmp") == -1:
tmp = self.runner._make_tmp_path(conn)
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
self.runner._low_level_exec_command(conn, "chmod a+rx %s" % module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
)
| gpl-3.0 |
rajathkumarmp/robocomp | tools/rcmaster/test/test.py | 1 | 6886 | import pexpect, sys
from os import environ , path
from time import sleep
from copy import deepcopy
import unittest
class Comp:
def __init__(self, name, cmd):
self.init_name = name
self.name = self.init_name
self.cmd = cmd
self.staus = 0
self.output = ''
self.extra_params = ''
self.process = pexpect.spawn('true')
self.process.kill(0)
def setName(self, name):
'''
set the name of component
'''
self.name = name
def add_param(self, param, append=True):
'''
add an extra param to be passed while calling the component
'''
if append:
self.extra_params = self.extra_params + " " + param
else:
self.extra_params = param
def start(self):
''''
start the comp
'''
self.status = 0
self.output = ''
fcmd = self.cmd + " --Ice.ProgramName="+ self.name + self.extra_params
fcmd = fcmd.split(" ")
# print fcmd
self.process = pexpect.spawn(fcmd[0],fcmd[1:],timeout=None,env=environ.copy())
def stop(self):
'''
stop the comp
'''
self.process.sendcontrol('c')
self.process.kill(0)
def getOutput(self):
'''
get all output till now
'''
l = self.process.buffer
self.output = self.output + str(l)
return self.output
def getCurrentOutput(self):
'''
get top output line in buffer
'''
line = str(self.process.readline())
self.output = self.output + line
return line.strip()
def checkInOutput(self, texts, timeOut=30, pattern=False):
'''
wait <timeout> seconds untill the text appears
@ pattern : given test is an pattern
@ texts : list or string of pattern/texts to matched
'''
try:
if pattern:
index = self.process.expect(texts, timeOut)
else:
index = self.process.expect_exact(texts, timeOut)
self.output = self.output + self.process.before + self.process.after
if index == 0:
return True
elif index == 1:
return False
except pexpect.EOF:
return False
except pexpect.TIMEOUT:
return False
return False
def getStatus(self):
'''
get comp running status
'''
self.status = self.process.isalive()
return self.status
def reset(self):
'''
reset to the init state
'''
self.stop()
self.extra_params = ''
self.setName(self.init_name)
class RCmasterTestCase(unittest.TestCase):
def setUp(self):
self.comps = dict()
# cdirPath = path.dirname(path.abspath(__file__))
self.comps["master"] = Comp("rcmaster","../src/rcmaster.py --Ice.Config=../etc/config")
self.comps["comp1"] = Comp("client1","clients/client1/src/client1.py --Ice.Config=clients/client1/etc/config")
self.comps["comp3"] = Comp("client3","clients/client3/src/client3.py --Ice.Config=clients/client3/etc/config")
self.comps["comp32"] = Comp("client32","clients/client3/src/client3.py --Ice.Config=clients/client3/etc/config")
self.comps["comp4"] = Comp("client4","clients/client4/src/client4.py --Ice.Config=clients/client4/etc/config")
def tearDown(self):
# stop all running comps
for name, comp in self.comps.iteritems():
if comp.getStatus() == True:
comp.stop()
# @unittest.skip("test skipping")
def test_rcmaster_start(self):
self.comps["master"].start()
op1 = self.comps["master"].checkInOutput(["Starting rcmaster"],10)
self.assertTrue(op1,msg="master cant start")
# @unittest.skip("test skipping")
def test_component_registration(self):
self.comps["master"].start()
op1 = self.comps["master"].checkInOutput(["Starting rcmaster"],10)
self.assertTrue(op1,msg="master cant start")
self.comps["comp3"].start()
op2 = self.comps["comp3"].checkInOutput(["Component Started"],5)
self.assertTrue(op2,msg="component cant start")
op2 = self.comps["master"].checkInOutput(["name = client3"],5)
self.assertTrue(op2,msg="component not registred")
# @unittest.skip("test skipping")
def test_multiple_same_interface(self):
self.comps["master"].start()
op1 = self.comps["master"].checkInOutput(["Starting rcmaster"],10)
self.assertTrue(op1,msg="master cant start")
self.comps["comp1"].start()
op1 = self.comps["comp1"].checkInOutput(["waiting for"],10)
self.assertTrue(op1,msg="comp1 not waiting for test interface")
self.comps["comp3"].setName("client31")
self.comps["comp3"].start()
op1 = self.comps["comp3"].checkInOutput(["Component Started"],5)
self.assertTrue(op1,msg="component cant start")
op2 = self.comps["master"].checkInOutput(["name = client31"],5)
self.assertTrue(op2,msg="client31 not registred")
self.comps["comp32"].start()
op1 = self.comps["comp32"].checkInOutput(["Component Started"],5)
self.assertTrue(op1,msg="component cant start")
op2 = self.comps["master"].checkInOutput(["name = client32"],5)
self.assertTrue(op2,msg="client32 not registred")
op1 = self.comps["comp3"].checkInOutput(["hello from client1"],10)
op2 = self.comps["comp32"].checkInOutput(["hello from client1"],10)
self.assertTrue(op1,msg="cant recive msg from comp1")
self.comps["comp3"].stop()
op1 = self.comps["comp1"].checkInOutput(["waiting for"],10)
self.assertTrue(op1,msg="comp1 not waiting for test interface after reset")
def test_caching(self):
# self.comps["master"].start()
# op1 = self.comps["master"].checkInOutput(["Starting rcmaster"],10)
# self.assertTrue(op1,msg="master cant start")
# self.comps["comp3"].start()
# op2 = self.comps["comp3"].checkInOutput(["Component Started"],5)
# self.assertTrue(op2,msg="component cant start")
# op2 = self.comps["master"].checkInOutput(["name = client3"],5)
# self.assertTrue(op2,msg="component not registred")
pass
if __name__ == '__main__':
unittest.main()
# comps = dict()
# comps["master"] = Comp("rcmaster","../src/rcmaster.py --Ice.Config=../etc/config")
# comps["master"].add_param("--rcmaster.cachettyl=2000")
# comps["master"].start()
# print "st ",comps["master"].getStatus()
# op = comps["master"].checkInOutput(["Starting rcmaster"],10)
# print op
# comps["master"].stop()
| gpl-3.0 |
cmcantalupo/geopm | integration/test/test_geopmagent.py | 1 | 6341 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import unittest
import subprocess
import json
import geopm_context
import geopmpy.io
import geopmpy.hash
class TestIntegrationGeopmagent(unittest.TestCase):
''' Tests of geopmagent.'''
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
with subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:
proc.wait()
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
proc.stdout.close()
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
'''
Check that geopmagent commandline arguments work.
'''
# no args
agent_names = ['monitor', 'power_balancer', 'power_governor',
'frequency_map']
self.check_output([], agent_names)
# help message
self.check_output(['--help'], ['Usage'])
# version
self.check_no_error(['--version'])
# agent policy and sample names
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
# policy file
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# default value policy
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
# unspecified policy values are accepted
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# hashing works for frequency map agent
self.check_json_output(['--agent', 'frequency_map', '--policy', '1e9,nan,hello,2e9'],
{'FREQ_DEFAULT': 1e9, 'FREQ_UNCORE': 'NAN',
'HASH_0': geopmpy.hash.crc32_str('hello'), 'FREQ_0': 2e9})
# errors
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lzweopard/carve | external/gtest-1.5.0/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
shaftoe/home-assistant | homeassistant/components/zwave/node_entity.py | 1 | 5871 | """Entity class that represents Z-Wave node."""
import logging
from homeassistant.core import callback
from homeassistant.const import ATTR_BATTERY_LEVEL, ATTR_WAKEUP
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from .const import ATTR_NODE_ID, DOMAIN, COMMAND_CLASS_WAKE_UP
from .util import node_name
_LOGGER = logging.getLogger(__name__)
ATTR_QUERY_STAGE = 'query_stage'
ATTR_AWAKE = 'is_awake'
ATTR_READY = 'is_ready'
ATTR_FAILED = 'is_failed'
ATTR_PRODUCT_NAME = 'product_name'
ATTR_MANUFACTURER_NAME = 'manufacturer_name'
STAGE_COMPLETE = 'Complete'
_REQUIRED_ATTRIBUTES = [
ATTR_QUERY_STAGE, ATTR_AWAKE, ATTR_READY, ATTR_FAILED,
'is_info_received', 'max_baud_rate', 'is_zwave_plus']
_OPTIONAL_ATTRIBUTES = ['capabilities', 'neighbors', 'location']
_COMM_ATTRIBUTES = [
'sentCnt', 'sentFailed', 'retries', 'receivedCnt', 'receivedDups',
'receivedUnsolicited', 'sentTS', 'receivedTS', 'lastRequestRTT',
'averageRequestRTT', 'lastResponseRTT', 'averageResponseRTT']
ATTRIBUTES = _REQUIRED_ATTRIBUTES + _OPTIONAL_ATTRIBUTES
class ZWaveBaseEntity(Entity):
"""Base class for Z-Wave Node and Value entities."""
def __init__(self):
"""Initialize the base Z-Wave class."""
self._update_scheduled = False
def maybe_schedule_update(self):
"""Maybe schedule state update.
If value changed after device was created but before setup_platform
was called - skip updating state.
"""
if self.hass and not self._update_scheduled:
self.hass.add_job(self._schedule_update)
@callback
def _schedule_update(self):
"""Schedule delayed update."""
if self._update_scheduled:
return
@callback
def do_update():
"""Really update."""
self.hass.async_add_job(self.async_update_ha_state)
self._update_scheduled = False
self._update_scheduled = True
self.hass.loop.call_later(0.1, do_update)
def sub_status(status, stage):
"""Format sub-status."""
return '{} ({})'.format(status, stage) if stage else status
class ZWaveNodeEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node."""
def __init__(self, node, network):
"""Initialize node."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._network = network
self.node = node
self.node_id = self.node.node_id
self._name = node_name(self.node)
self._product_name = node.product_name
self._manufacturer_name = node.manufacturer_name
self.entity_id = "{}.{}_{}".format(
DOMAIN, slugify(self._name), self.node_id)
self._attributes = {}
self.wakeup_interval = None
self.location = None
self.battery_level = None
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_NOTIFICATION)
def network_node_changed(self, node=None, args=None):
"""Handle a changed node on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and 'nodeId' in args and \
args['nodeId'] != self.node_id:
return
self.node_changed()
def get_node_statistics(self):
"""Retrieve statistics from the node."""
return self._network.manager.getNodeStatistics(
self._network.home_id, self.node_id)
def node_changed(self):
"""Update node properties."""
attributes = {}
stats = self.get_node_statistics()
for attr in ATTRIBUTES:
value = getattr(self.node, attr)
if attr in _REQUIRED_ATTRIBUTES or value:
attributes[attr] = value
for attr in _COMM_ATTRIBUTES:
attributes[attr] = stats[attr]
if self.node.can_wake_up():
for value in self.node.get_values(COMMAND_CLASS_WAKE_UP).values():
self.wakeup_interval = value.data
break
else:
self.wakeup_interval = None
self.battery_level = self.node.get_battery_level()
self._attributes = attributes
self.maybe_schedule_update()
@property
def state(self):
"""Return the state."""
if ATTR_READY not in self._attributes:
return None
stage = ''
if not self._attributes[ATTR_READY]:
# If node is not ready use stage as sub-status.
stage = self._attributes[ATTR_QUERY_STAGE]
if self._attributes[ATTR_FAILED]:
return sub_status('Dead', stage)
if not self._attributes[ATTR_AWAKE]:
return sub_status('Sleeping', stage)
if self._attributes[ATTR_READY]:
return sub_status('Ready', stage)
return stage
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self.node_id,
ATTR_MANUFACTURER_NAME: self._manufacturer_name,
ATTR_PRODUCT_NAME: self._product_name,
}
attrs.update(self._attributes)
if self.battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = self.battery_level
if self.wakeup_interval is not None:
attrs[ATTR_WAKEUP] = self.wakeup_interval
return attrs
| apache-2.0 |
jrleja/bsfh | misc/timings_pyfsps.py | 3 | 4274 | #compare a lookup table of spectra at ages and metallicities to
#calls to fsps.sps.get_spectrum() for different metallicities
import time, os, subprocess, re, sys
import numpy as np
#import matplotlib.pyplot as pl
import fsps
from prospect import sources as sps_basis
from prospect.models import sedmodel
def run_command(cmd):
"""
Open a child process, and return its exit status and stdout.
"""
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = [s for s in child.stdout]
w = child.wait()
return os.WEXITSTATUS(w), out
# Check to make sure that the required environment variable is present.
try:
ev = os.environ["SPS_HOME"]
except KeyError:
raise ImportError("You need to have the SPS_HOME environment variable")
# Check the SVN revision number.
cmd = ["svnversion", ev]
stat, out = run_command(" ".join(cmd))
fsps_vers = int(re.match("^([0-9])+", out[0]).group(0))
sps = fsps.StellarPopulation(zcontinuous=True)
print('FSPS version = {}'.format(fsps_vers))
print('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths)))
print('single age')
def spec_from_fsps(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage'])
#print(spec.shape)
return time.time()-t0
def mags_from_fsps(z, t, s):
t0 = time.time()
sps.params['zred']=t
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0)
#print(spec.shape)
return time.time()-t0
def spec_from_ztinterp(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
sps.params['imf3'] = s
spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True)
#print(spec.shape)
return time.time()-t0
if sys.argv[1] == 'mags':
from_fsps = mags_from_fsps
print('timing get_mags')
print('nbands = {}'.format(len(sps.get_mags(tage=1.0))))
elif sys.argv[1] == 'spec':
from_fsps = spec_from_fsps
print('timing get_spectrum')
elif sys.argv[1] == 'ztinterp':
from_fsps = spec_from_ztinterp
print('timing get_spectrum')
elif sys.argv[1] == 'sedpy':
from sedpy import observate
nbands = len(sps.get_mags(tage=1.0))
fnames = nbands * ['sdss_r0']
filters = observate.load_filters(fnames)
def mags_from_sedpy(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True,
tage = sps.params['tage'])
mags = observate.getSED(wave, spec, filters)
return time.time()-t0
from_fsps = mags_from_sedpy
sps.params['add_neb_emission'] = False
sps.params['smooth_velocity'] = True
sps.params['sfh'] = 0
ntry = 30
zz = np.random.uniform(-1,0,ntry)
tt = np.random.uniform(0.1,4,ntry)
ss = np.random.uniform(1,2.5,ntry)
#make sure all z's already compiled
_ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]]
all_dur = []
print('no neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('no neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
sps.params['add_neb_emission'] = True
print('neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
| mit |
acarmel/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/kickasstorrents.py | 7 | 7085 | import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
urls = {
'detail': '%s/%s',
'search': '%s/%s-i%s/',
}
cat_ids = [
(['cam'], ['cam']),
(['telesync'], ['ts', 'tc']),
(['screener', 'tvrip'], ['screener']),
(['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']),
(['dvdrip'], ['dvdrip']),
(['dvd'], ['dvdr']),
]
http_time_between_calls = 1 # Seconds
cat_backup_id = None
proxy_list = [
'https://kat.cr',
'http://katproxy.com',
]
def _search(self, media, quality, results):
data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))
if data:
cat_ids = self.getCatId(quality)
table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']
try:
html = BeautifulSoup(data)
resultdiv = html.find('div', attrs = {'class': 'tabs'})
for result in resultdiv.find_all('div', recursive = False):
if result.get('id').lower().strip('tab-') not in cat_ids:
continue
try:
for temp in result.find_all('tr'):
if temp['class'] is 'firstr' or not temp.get('id'):
continue
new = {}
nr = 0
for td in temp.find_all('td'):
column_name = table_order[nr]
if column_name:
if column_name == 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
new['id'] = temp.get('id')[-7:]
new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
new['verified'] = True if td.find('a', 'iverify') else False
new['score'] = 100 if new['verified'] else 0
elif column_name is 'size':
new['size'] = self.parseSize(td.text)
elif column_name is 'age':
new['age'] = self.ageToDays(td.text)
elif column_name is 'seeds':
new['seeders'] = tryInt(td.text)
elif column_name is 'leechers':
new['leechers'] = tryInt(td.text)
nr += 1
# Only store verified torrents
if self.conf('only_verified') and not new['verified']:
continue
results.append(new)
except:
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
except AttributeError:
log.debug('No search results found.')
def ageToDays(self, age_str):
age = 0
age_str = age_str.replace(' ', ' ')
regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+'
matches = re.findall(regex, age_str)
for match in matches:
nr, size = match
mult = 1
if size == 'week':
mult = 7
elif size == 'month':
mult = 30.5
elif size == 'year':
mult = 365
age += tryInt(nr) * mult
return tryInt(age)
def isEnabled(self):
return super(Base, self).isEnabled() and self.getDomain()
def correctProxy(self, data):
return 'search query' in data.lower()
config = [{
'name': 'kickasstorrents',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'KickAssTorrents',
'description': '<a href="https://kat.ph/">KickAssTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'only_verified',
'advanced': True,
'type': 'bool',
'default': False,
'description': 'Only search for verified releases.'
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 |
lindycoder/fake-switches | fake_switches/terminal/ssh.py | 4 | 2985 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.conch import recvline
from fake_switches.terminal import TerminalController
class SwitchSSHShell(recvline.HistoricRecvLine):
def __init__(self, user, switch_core):
self.user = user
self.switch_core = switch_core
self.session = None
self.awaiting_keystroke = None
# Hack to get rid of magical characters that reset the screen / clear / goto position 0, 0
def initializeScreen(self):
self.mode = 'insert'
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.session = self.switch_core.launch("ssh", SshTerminalController(
shell=self
))
def lineReceived(self, line):
line = line.decode()
still_listening = self.session.receive(line)
if not still_listening:
self.terminal.loseConnection()
def keystrokeReceived(self, keyID, modifier):
if keyID in self._printableChars:
if self.awaiting_keystroke is not None:
args = self.awaiting_keystroke[1] + [keyID.decode()]
cmd = self.awaiting_keystroke[0]
cmd(*args)
return
super(SwitchSSHShell, self).keystrokeReceived(keyID, modifier)
# replacing behavior of twisted/conch/recvline.py:205
def characterReceived(self, ch, moreCharactersComing):
command_processor = self.get_actual_processor()
if command_processor.replace_input is False:
self.terminal.write(ch)
else:
self.terminal.write((len(ch) * command_processor.replace_input).encode())
if self.mode == 'insert':
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex:self.lineBufferIndex+1] = [ch]
self.lineBufferIndex += 1
def get_actual_processor(self):
proc = self.session.command_processor
while proc.sub_processor is not None:
proc = proc.sub_processor
return proc
class SshTerminalController(TerminalController):
def __init__(self, shell):
self.shell = shell
def write(self, text):
self.shell.terminal.write(text.encode())
def add_any_key_handler(self, callback, *params):
self.shell.awaiting_keystroke = (callback, list(params))
def remove_any_key_handler(self):
self.shell.awaiting_keystroke = None
| apache-2.0 |
saullocastro/compmech | theory/func/bardell/print_bardell_integrals_to_fortran.py | 2 | 2549 | import os
import glob
from ast import literal_eval
import numpy as np
import sympy
from sympy import pi, sin, cos, var
var('xi1, xi2')
var('x1t, x1r, x2t, x2r')
var('y1t, y1r, y2t, y2r')
subs = {
}
def List(*e):
return list(e)
printstr_12 = ''
printstr_full = ''
for i, filepath in enumerate(
glob.glob(r'.\bardell_integrals_mathematica\fortran_*.txt')):
print(filepath)
with open(filepath) as f:
filename = os.path.basename(filepath)
names = filename[:-4].split('_')
lines = [line.strip() for line in f.readlines()]
string = ''.join(lines)
string = string.replace('\\','')
tmp = eval(string)
if '_12' in filepath:
name = '_'.join(names[1:3])
else:
name = names[1]
matrix = sympy.Matrix(np.atleast_2d(tmp)).evalf()
if '_12' in filepath:
printstr = 'SUBROUTINE integral_%s(xi1, xi2, i, j, x1t, x1r, x2t, x2r, y1t, y1r, y2t, y2r, out)\n' % name
printstr += ' REAL*8, INTENT(IN) :: xi1, xi2, x1t, x1r, x2t, x2r, y1t, y1r, y2t, y2r\n'
else:
printstr = 'SUBROUTINE integral_%s(i, j, x1t, x1r, x2t, x2r, y1t, y1r, y2t, y2r, out)\n' % name
printstr += ' REAL*8, INTENT(IN) :: x1t, x1r, x2t, x2r, y1t, y1r, y2t, y2r\n'
printstr += ' INTEGER, INTENT(IN) :: i, j\n'
printstr += ' REAL*8, INTENT(OUT) :: out\n'
printstr += ' out = 0\n'
printstr += ' SELECT CASE (i)\n'
for i in range(matrix.shape[0]):
activerow = False
for j in range(matrix.shape[1]):
if matrix[i, j] == 0:
continue
if not activerow:
activerow = True
printstr += ' CASE (%d)\n' % (i+1)
printstr += ' SELECT CASE (j)\n'
printstr += ' CASE (%d)\n' % (j+1)
printstr += ' out = %s\n' % str(matrix[i, j])
printstr += ' RETURN\n'
printstr += ' END SELECT\n'
printstr += ' END SELECT\n'
printstr += 'END SUBROUTINE integral_%s\n\n\n' % name
if '_12' in filepath:
printstr_12 += printstr
else:
printstr_full += printstr
with open('.\\bardell_integrals_fortran\\bardell_integrals_fortran_12.txt', 'w') as f:
f.write(printstr_12)
with open('.\\bardell_integrals_fortran\\bardell_integrals_fortran_full.txt', 'w') as f:
f.write(printstr_full)
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/xx_applications/plot_morphology.py | 6 | 8329 | """
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
fig, ax = plt.subplots()
ax.imshow(phantom, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Let's also define a convenience function for plotting comparisons:
"""
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
"""
Erosion
=======
Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
pixels in the neighborhood centered at (i, j)*. The structuring element,
``selem``, passed to ``erosion`` is a boolean array that describes this
neighborhood. Below, we use ``disk`` to create a circular structuring element,
which we use for most of the following examples.
"""
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(phantom, selem)
plot_comparison(phantom, eroded, 'erosion')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image disappears or gets eroded as we
increase the size of the disk. Also notice the increase in size of the two
black ellipses in the center and the disappearance of the 3 light grey
patches in the lower part of the image.
Dilation
========
Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
regions and shrinks dark regions.
"""
dilated = dilation(phantom, selem)
plot_comparison(phantom, dilated, 'dilation')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image thickens, or gets dilated, as we
increase the size of the disk. Also notice the decrease in size of the two
black ellipses in the centre, and the thickening of the light grey circle in
the center and the 3 patches in the lower part of the image.
Opening
=======
Morphological ``opening`` on an image is defined as an *erosion followed by a
dilation*. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks.
"""
opened = opening(phantom, selem)
plot_comparison(phantom, opened, 'opening')
"""
.. image:: PLOT2RST.current_figure
Since ``opening`` an image starts with an erosion operation, light regions that
are *smaller* than the structuring element are removed. The dilation operation
that follows ensures that light regions that are *larger* than the structuring
element retain their original size. Notice how the light and dark shapes in the
center their original thickness but the 3 lighter patches in the bottom get
completely eroded. The size dependence is highlighted by the outer white ring:
The parts of the ring thinner than the structuring element were completely
erased, while the thicker region at the top retains its original thickness.
Closing
=======
Morphological ``closing`` on an image is defined as a *dilation followed by an
erosion*. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks.
To illustrate this more clearly, let's add a small crack to the white border:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
"""
.. image:: PLOT2RST.current_figure
Since ``closing`` an image starts with an dilation operation, dark regions
that are *smaller* than the structuring element are removed. The dilation
operation that follows ensures that dark regions that are *larger* than the
structuring element retain their original size. Notice how the white ellipses
at the bottom get connected because of dilation, but other dark region retain
their original sizes. Also notice how the crack we added is mostly removed.
White tophat
============
The ``white_tophat`` of an image is defined as the *image minus its
morphological opening*. This operation returns the bright spots of the image
that are smaller than the structuring element.
To make things interesting, we'll add bright and dark spots to the image:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide white square is highlighted since it is
smaller than the structuring element. Also, the thin, white edges around most
of the ellipse are retained because they're smaller than the structuring
element, but the thicker region at the top disappears.
Black tophat
============
The ``black_tophat`` of an image is defined as its morphological **closing
minus the original image**. This operation returns the *dark spots of the
image that are smaller than the structuring element*.
"""
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide black square is highlighted since it is
smaller than the structuring element.
Duality
-------
As you should have noticed, many of these operations are simply the reverse
of another operation. This duality can be summarized as follows:
1. Erosion <-> Dilation
2. Opening <-> Closing
3. White tophat <-> Black tophat
Skeletonize
===========
Thinning is used to reduce each connected component in a binary image to a
*single-pixel wide skeleton*. It is important to note that this is performed
on binary images only.
"""
from skimage import img_as_bool
horse = ~img_as_bool(io.imread(data_dir+'/horse.png', as_grey=True))
sk = skeletonize(horse)
plot_comparison(horse, sk, 'skeletonize')
"""
.. image:: PLOT2RST.current_figure
As the name suggests, this technique is used to thin the image to 1-pixel wide
skeleton by applying thinning successively.
Convex hull
===========
The ``convex_hull_image`` is the *set of pixels included in the smallest
convex polygon that surround all white pixels in the input image*. Again note
that this is also performed on binary images.
"""
hull1 = convex_hull_image(horse)
plot_comparison(horse, hull1, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
which covers the white or True completely in the image.
If we add a small grain to the image, we can see how the convex hull adapts to
enclose that grain:
"""
import numpy as np
horse2 = np.copy(horse)
horse2[45:50, 75:80] = 1
hull2 = convex_hull_image(horse2)
plot_comparison(horse2, hull2, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
Additional Resources
====================
1. `MathWorks tutorial on morphological processing
<http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-erosion.html>`_
2. `Auckland university's tutorial on Morphological Image Processing
<http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
3. http://en.wikipedia.org/wiki/Mathematical_morphology
"""
plt.show()
| bsd-3-clause |
yausern/stlab | devices/Keithley_6430.py | 1 | 1068 | import visa
import numpy as np
from stlab.devices.instrument import instrument
import time
class keithley6430(instrument):
def __init__(self,addr = "TCPIP::192.168.1.212::1470::SOCKET", reset = True, verb=True, read_termination='\r\n'):
#Needs \r\n line termination
super().__init__(addr,reset,verb,read_termination = read_termination)
self.id()
def SetOutputOn(self):
self.write('OUTPUT ON')
return
def SetOutputOff(self):
self.write('OUTPUT OFF')
return
def SetModeVoltage(self):
self.write(":SOUR:FUNC VOLT")
return
def SetVoltage(self,volt):
self.write(":SOUR:VOLT:LEV:IMM:AMPL {}".format(volt))
return
def SetComplianceCurrent(self,curr):
self.write(":SENS:CURR:DC:PROT:LEV {}".format(curr))
return
def GetVoltage(self):
result=float(self.query(":SOUR:VOLT:LEV:IMM:AMPL?"))
return result
def GetMetadataString(self): #Should return a string of metadata adequate to write to a file
pass
| gpl-3.0 |
deedwar/pure | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
mjtamlyn/django | tests/raw_query/models.py | 19 | 1246 | from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
dob = models.DateField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Protect against annotations being passed to __init__ --
# this'll make the test suite get angry if annotations aren't
# treated differently than fields.
for k in kwargs:
assert k in [f.attname for f in self._meta.fields], \
"Author.__init__ got an unexpected parameter: %s" % k
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, models.CASCADE)
paperback = models.BooleanField(default=False)
opening_line = models.TextField()
class BookFkAsPk(models.Model):
book = models.ForeignKey(Book, models.CASCADE, primary_key=True, db_column="not_the_default")
class Coffee(models.Model):
brand = models.CharField(max_length=255, db_column="name")
price = models.DecimalField(max_digits=10, decimal_places=2, default=0)
class Reviewer(models.Model):
reviewed = models.ManyToManyField(Book)
class FriendlyAuthor(Author):
pass
| bsd-3-clause |
codenote/chromium-test | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 6 | 8213 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
proc = subprocess.Popen([os.path.join(browser_dir, crash_service_exe),
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe')
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
jawilson/Flexget | flexget/tests/test_assume_quality.py | 9 | 4925 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from jinja2 import Template
import flexget.utils.qualities as qualities
from flexget.task import TaskAbort
class TestAssumeQuality(object):
_config = """
templates:
global:
parsing:
series: {{parser}}
movie: {{parser}}
mock:
- {title: 'Testfile[h264-720p]'}
- {title: 'Testfile.1280x720'}
- {title: 'Testfile.HDTV'}
- {title: 'Testfile.cam'}
- {title: 'Testfile.noquality'}
- {title: 'Testfile.xvid.mp3'}
accept_all: yes
tasks:
test_default:
assume_quality:
720p: flac
h264: 10bit
HDTV: truehd
any: 720p h264
test_simple:
assume_quality: 720p h264
test_priority:
assume_quality:
720p: mp3
720p h264: flac
h264: mp3
test_matching:
assume_quality:
hdtv: 720p
test_negative_matching:
assume_quality:
'!xvid !divx !mp3': 1080p
test_no_clobber:
assume_quality:
720p: xvid
test_invalid_target:
assume_quality:
potato: 720p
test_with_series:
template: no_global
mock:
- title: my show S01E01 hdtv
assume_quality: 720p
series:
- my show:
quality: 720p hdtv
test_with_series_target:
template: no_global
mock:
- title: my show S01E01 hdtv
assume_quality: 720p
series:
- my show:
target: 720p hdtv
test_with_series_qualities:
template: no_global
mock:
- title: my show S01E01 hdtv
assume_quality: 720p
series:
- my show:
qualities: [720p hdtv]
"""
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(self, request):
"""Override and parametrize default config fixture."""
return Template(self._config).render({'parser': request.param})
def test_matching(self, execute_task):
task = execute_task('test_matching')
entry = task.find_entry('entries', title='Testfile.HDTV')
assert entry.get('quality') == qualities.Quality('720p HDTV')
def test_negative_matching(self, execute_task):
task = execute_task('test_negative_matching')
entry = task.find_entry('entries', title='Testfile.HDTV')
assert entry.get('quality') == qualities.Quality('1080p HDTV')
entry = task.find_entry('entries', title='Testfile.xvid.mp3')
assert entry.get('quality') == qualities.Quality('xvid mp3')
def test_no_clobber(self, execute_task):
task = execute_task('test_no_clobber')
entry = task.find_entry('entries', title='Testfile[h264-720p]')
assert entry.get('quality') != qualities.Quality('720p xvid')
assert entry.get('quality') == qualities.Quality('720p h264')
def test_default(self, execute_task):
task = execute_task('test_default')
entry = task.find_entry('entries', title='Testfile.noquality')
assert entry.get('quality') == qualities.Quality('720p h264'), 'Testfile.noquality quality not \'720p h264\''
def test_simple(self, execute_task):
task = execute_task('test_simple')
entry = task.find_entry('entries', title='Testfile.noquality')
assert entry.get('quality') == qualities.Quality('720p h264'), 'Testfile.noquality quality not \'720p h264\''
def test_priority(self, execute_task):
task = execute_task('test_priority')
entry = task.find_entry('entries', title='Testfile[h264-720p]')
assert entry.get('quality') != qualities.Quality('720p h264 mp3')
assert entry.get('quality') == qualities.Quality('720p h264 flac')
def test_invalid_target(self, execute_task):
with pytest.raises(TaskAbort):
execute_task('test_invalid_target')
def test_with_series(self, execute_task):
task = execute_task('test_with_series')
assert task.accepted, 'series plugin should have used assumed quality'
def test_with_series_target(self, execute_task):
task = execute_task('test_with_series_target')
assert task.accepted, 'series plugin should have used assumed quality'
def test_with_series_qualities(self, execute_task):
task = execute_task('test_with_series_qualities')
assert task.accepted, 'series plugin should have used assumed quality'
| mit |
bkolli/swift | test/unit/container/test_sync.py | 5 | 45683 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from textwrap import dedent
import mock
from test.unit import debug_logger
from swift.container import sync
from swift.common import utils
from swift.common.wsgi import ConfigString
from swift.common.exceptions import ClientException
from swift.common.storage_policy import StoragePolicy
import test
from test.unit import patch_policies, with_tempdir
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'endcap'
class FakeRing(object):
def __init__(self):
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in range(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestContainerSync(unittest.TestCase):
def setUp(self):
self.logger = debug_logger('test-container-sync')
def test_FileLikeIter(self):
# Retained test to show new FileLikeIter acts just like the removed
# _Iter2FileLikeObject did.
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
expect = '1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEqual(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEqual(flo.read(), expect)
self.assertEqual(flo.read(), '')
self.assertEqual(flo.read(2), '')
flo = sync.FileLikeIter(iter(['123', '4567', '89', '0']))
self.assertEqual(flo.read(), '1234567890')
self.assertEqual(flo.read(), '')
self.assertEqual(flo.read(2), '')
def assertLogMessage(self, msg_level, expected, skip=0):
for line in self.logger.get_lines_for_level(msg_level)[skip:]:
msg = 'expected %r not in %r' % (expected, line)
self.assertTrue(expected in line, msg)
@with_tempdir
def test_init(self, tempdir):
ic_conf_path = os.path.join(tempdir, 'internal-client.conf')
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
# specified but not exists will not start
conf = {'internal_client_conf_path': ic_conf_path}
self.assertRaises(SystemExit, sync.ContainerSync, conf,
container_ring=cring, logger=self.logger)
# not specified will use default conf
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertTrue(isinstance(conf_path, ConfigString))
self.assertEqual(conf_path.contents.getvalue(),
dedent(sync.ic_conf_body))
self.assertLogMessage('warning', 'internal_client_conf_path')
self.assertLogMessage('warning', 'internal-client.conf-sample')
# correct
contents = dedent(sync.ic_conf_body)
with open(ic_conf_path, 'w') as f:
f.write(contents)
with mock.patch('swift.container.sync.InternalClient') as mock_ic:
cs = sync.ContainerSync(conf, container_ring=cring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(mock_ic.called)
conf_path, name, retry = mock_ic.call_args[0]
self.assertEqual(conf_path, ic_conf_path)
sample_conf_filename = os.path.join(
os.path.dirname(test.__file__),
'../etc/internal-client.conf-sample')
with open(sample_conf_filename) as sample_conf_file:
sample_conf = sample_conf_file.read()
self.assertEqual(contents, sample_conf)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603] # Elapsed time for "under interval" (yes)
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit
yield 'container.db', 'device', 'partition'
return
orig_time = sync.time
orig_sleep = sync.sleep
orig_ContainerBroker = sync.ContainerBroker
orig_audit_location_generator = sync.audit_location_generator
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
sync.time = fake_time
sync.sleep = fake_sleep
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_forever(1, 2, a=3, b=4, verbose=True)
except Exception as err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.sleep = orig_sleep
sync.audit_location_generator = orig_audit_location_generator
sync.ContainerBroker = orig_ContainerBroker
self.assertEqual(time_calls, [9])
self.assertEqual(len(sleep_calls), 2)
self.assertTrue(sleep_calls[0] <= cs.interval)
self.assertTrue(sleep_calls[1] == cs.interval - 1)
self.assertEqual(audit_location_generator_calls, [2])
self.assertEqual(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interim report.
time_calls = [0]
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605] # For elapsed
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit
yield 'container.db', 'device', 'partition'
return
orig_time = sync.time
orig_audit_location_generator = sync.audit_location_generator
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
sync.time = fake_time
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing())
sync.audit_location_generator = fake_audit_location_generator
cs.run_once(1, 2, a=3, b=4, verbose=True)
self.assertEqual(time_calls, [6])
self.assertEqual(audit_location_generator_calls, [1])
self.assertEqual(cs.reported, 3602)
cs.run_once()
except Exception as err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.audit_location_generator = orig_audit_location_generator
sync.ContainerBroker = orig_ContainerBroker
self.assertEqual(time_calls, [10])
self.assertEqual(audit_location_generator_calls, [2])
self.assertEqual(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
self.assertEqual(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({
'bind_ip': '10.0.0.0',
}, container_ring=cring)
# Plumbing test for bind_ip and whataremyips()
self.assertEqual(['10.0.0.0'], cs._myips)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 2)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(
p, info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
fcb = FakeContainerBroker('path', info={'account': 'a',
'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 1,
'x_container_sync_point2': 1},
metadata={'x-container-sync-to':
('http://127.0.0.1/a/c', 1),
'x-container-sync-key':
('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.hash_path', fake_hash_path):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated yet
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, -1)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
with mock.patch('swift.container.sync.ContainerBroker', lambda p: fcb):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEqual(cs.container_failures, 2)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
raise ClientException
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.delete_object',
fake_delete_object):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, -1)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
with mock.patch('swift.container.sync.ContainerBroker',
lambda p: fcb), \
mock.patch('swift.container.sync.delete_object',
lambda *x, **y: None):
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEqual(cs.container_failures, 3)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, None)
self.assertEqual(fcb.sync_point2, 1)
def test_container_second_loop(self):
cring = FakeRing()
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=cring,
logger=self.logger)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEqual(cs.container_failures, 0)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
# Nevertheless the fault is skipped
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
fcb = FakeContainerBroker(
'path',
info={'account': 'a', 'container': 'c',
'storage_policy_index': 0,
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEqual(cs.container_failures, 1)
self.assertEqual(cs.container_skips, 0)
self.assertEqual(fcb.sync_point1, 1)
self.assertEqual(fcb.sync_point2, None)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_sync_row_delete(self):
self._test_container_sync_row_delete(None, None)
def test_container_sync_row_delete_using_realms(self):
self._test_container_sync_row_delete('US', 'realm_key')
def _test_container_sync_row_delete(self, realm, realm_key):
orig_uuid = sync.uuid
orig_delete_object = sync.delete_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
def fake_delete_object(path, name=None, headers=None, proxy=None,
logger=None, timeout=None):
self.assertEqual(path, 'http://sync/to/path')
self.assertEqual(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef 90e95aabb45a6cdc0892a3db5535e7f918428c90',
'x-timestamp': '1.2'})
else:
self.assertEqual(
headers,
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.delete_object = fake_delete_object
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
# Success
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
exc = []
def fake_delete_object(*args, **kwargs):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 1)
self.assertEqual(len(exc), 2)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_delete_object(*args, **kwargs):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row(
{'deleted': True,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_deletes, 2)
self.assertEqual(len(exc), 3)
self.assertEqual(str(exc[-1]), 'test client exception: 404')
finally:
sync.uuid = orig_uuid
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
self._test_container_sync_row_put(None, None)
def test_container_sync_row_put_using_realms(self):
self._test_container_sync_row_put('US', 'realm_key')
def _test_container_sync_row_put(self, realm, realm_key):
orig_uuid = sync.uuid
orig_shuffle = sync.shuffle
orig_put_object = sync.put_object
try:
class FakeUUID(object):
class uuid4(object):
hex = 'abcdef'
sync.uuid = FakeUUID
sync.shuffle = lambda x: x
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None, logger=None,
timeout=None):
self.assertEqual(sync_to, 'http://sync/to/path')
self.assertEqual(name, 'object')
if realm:
self.assertEqual(headers, {
'x-container-sync-auth':
'US abcdef ef62c64bb88a33fa00722daa23d5d43253164962',
'x-timestamp': '1.2',
'etag': 'etagvalue',
'other-header': 'other header value',
'content-type': 'text/plain'})
else:
self.assertEqual(headers, {
'x-container-sync-key': 'key',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': 'etagvalue',
'content-type': 'text/plain'})
self.assertEqual(contents.read(), 'contents')
self.assertEqual(proxy, 'http://proxy')
self.assertEqual(timeout, 5.0)
self.assertEqual(logger, self.logger)
sync.put_object = fake_put_object
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync({}, container_ring=FakeRing(),
logger=self.logger)
cs.http_proxies = ['http://proxy']
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'other-header': 'other header value',
'etag': '"etagvalue"', 'x-timestamp': '1.2',
'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
cs.swift.get_object = fake_get_object
# Success as everything says it worked
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 1)
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200,
{'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': '"etagvalue"',
'content-type': 'text/plain; swift_bytes=123'},
iter('contents'))
cs.swift.get_object = fake_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
self.assertTrue(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(Exception('test exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test exception')
exc = []
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
exc.append(ClientException('test client exception'))
raise exc[-1]
cs.swift.get_object = fake_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
self.assertEqual(len(exc), 1)
self.assertEqual(str(exc[-1]), 'test client exception')
def fake_get_object(acct, con, obj, headers, acceptable_statuses):
self.assertEqual(headers['X-Newest'], True)
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
'0')
return (200, {'other-header': 'other header value',
'x-timestamp': '1.2', 'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=401)
cs.swift.get_object = fake_get_object
sync.put_object = fake_put_object
# Fail due to 401
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
self.assertLogMessage('info', 'Unauth')
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
self.assertLogMessage('info', 'Not found', 1)
def fake_put_object(*args, **kwargs):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row(
{'deleted': False,
'name': 'object',
'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'),
{'account': 'a', 'container': 'c', 'storage_policy_index': 0},
realm, realm_key))
self.assertEqual(cs.container_puts, 2)
self.assertLogMessage('error', 'ERROR Syncing')
finally:
sync.uuid = orig_uuid
sync.shuffle = orig_shuffle
sync.put_object = orig_put_object
def test_select_http_proxy_None(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': ''}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), None)
def test_select_http_proxy_one(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one'}, container_ring=FakeRing())
self.assertEqual(cs.select_http_proxy(), 'http://one')
def test_select_http_proxy_multiple(self):
with mock.patch('swift.container.sync.InternalClient'):
cs = sync.ContainerSync(
{'sync_proxy': 'http://one,http://two,http://three'},
container_ring=FakeRing())
self.assertEqual(
set(cs.http_proxies),
set(['http://one', 'http://two', 'http://three']))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
freedomtan/tensorflow | tensorflow/python/keras/optimizer_v2/adam.py | 4 | 20375 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam optimizer implementation."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import gen_training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adam')
class Adam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use, The
learning rate. Defaults to 0.001.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use, The
exponential decay rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond". Defaults to `False`.
name: Optional name for the operations created when applying gradients.
Defaults to `"Adam"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
gradients by value.
Usage:
>>> opt = tf.keras.optimizers.Adam(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> # The first step is `-learning_rate*sign(grad)`
>>> var1.numpy()
9.9
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
Notes:
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name='Adam',
**kwargs):
super(Adam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adam, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(Adam, self).set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
if not self.amsgrad:
return gen_training_ops.ResourceApplyAdam(
var=var.handle,
m=m.handle,
v=v.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=coefficients['lr_t'],
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return gen_training_ops.ResourceApplyAdamWithAmsgrad(
var=var.handle,
m=m.handle,
v=v.handle,
vhat=vhat.handle,
beta1_power=coefficients['beta_1_power'],
beta2_power=coefficients['beta_2_power'],
lr=coefficients['lr_t'],
beta1=coefficients['beta_1_t'],
beta2=coefficients['beta_2_t'],
epsilon=coefficients['epsilon'],
grad=grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if not self.amsgrad:
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = math_ops.maximum(v_hat, v_t)
with ops.control_dependencies([v_hat_t]):
v_hat_t = state_ops.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = math_ops.sqrt(v_hat_t)
var_update = state_ops.assign_sub(
var,
coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t])
def get_config(self):
config = super(Adam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
})
return config
class NonFusedAdam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adam algorithm without fused kernels.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to the paper
[Adam: A Method for Stochastic Optimization. Kingma et al.,
2014](http://arxiv.org/abs/1412.6980), the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
For AMSGrad see [On The Convergence Of Adam And Beyond.
Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ).
**If amsgrad = False**:
initialize $m_0$ as 1st moment vector
initialize $v_0$ as 2nd moment vector
The update rule for $\theta$ with gradient $g$ uses an optimization
described at the end of section 2 of the paper:
$$lr_t = \mathrm{learning\_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
$$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
**If amsgrad = True**:
initialize $m_0$ as 1st moment vector
initialize $v_0$ as 2nd moment vector
initialize $\hat{v}_0$ as 2nd moment vector
The update rule for $\theta$ with gradient $g$ uses an optimization
described at the end of section 2 of the paper:
$$lr_t = \mathrm{learning\_rate} *
\sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
$$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
$$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
$$\hat{v}_t = \max(\hat{v}_{t-1}, v_t)$$
$$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Usage:
>>> opt = tf.keras.optimizers.Adam(learning_rate=0.1)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> # The first step is `-learning_rate*sign(grad)`
>>> var1.numpy()
9.9
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name='Adam',
**kwargs):
"""Construct a new Adam optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
takes no arguments and returns the actual value to use, The learning
rate. Defaults to 0.001.
beta_1: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use. The exponential decay
rate for the 1st moment estimates. Defaults to 0.9.
beta_2: A float value or a constant float tensor, or a callable that takes
no arguments and returns the actual value to use, The exponential decay
rate for the 2nd moment estimates. Defaults to 0.999.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond". Defaults to `False`.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(NonFusedAdam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(NonFusedAdam, self)._prepare_local(var_device, var_dtype, apply_state)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
lr = (
apply_state[(var_device, var_dtype)]['lr_t'] *
(math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)))
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
beta_2_t=beta_2_t,
beta_2_power=beta_2_power,
one_minus_beta_2_t=1 - beta_2_t))
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(NonFusedAdam, self).set_weights(weights)
@def_function.function(jit_compile=True)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
alpha = (
coefficients['lr_t'] * math_ops.sqrt(1 - coefficients['beta_2_power']) /
(1 - coefficients['beta_1_power']))
m.assign_add((grad - m) * (1 - coefficients['beta_1_t']))
v.assign_add((math_ops.square(grad) - v) * (1 - coefficients['beta_2_t']))
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat.assign(math_ops.maximum(vhat, v))
v = vhat
var.assign_sub(
(m * alpha) / (math_ops.sqrt(v) - coefficients['epsilon']))
@def_function.function(jit_compile=True)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype)) or
self._fallback_apply_state(var_device, var_dtype))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m.assign(m * coefficients['beta_1_t'])
m.scatter_add(ops.IndexedSlices(m_scaled_g_values, indices))
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v.assign(v * coefficients['beta_2_t'])
v.scatter_add(ops.IndexedSlices(v_scaled_g_values, indices))
if not self.amsgrad:
var.assign_sub(coefficients['lr'] * m /
(math_ops.sqrt(v) + coefficients['epsilon']))
else:
v_hat = self.get_slot(var, 'vhat')
v_hat.assign(math_ops.maximum(v_hat, v))
var.assign_sub(coefficients['lr'] * m /
(math_ops.sqrt(v_hat) + coefficients['epsilon']))
def get_config(self):
config = super(NonFusedAdam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
})
return config
| apache-2.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.4/django/core/servers/basehttp.py | 76 | 8894 | """
HTTP server that implements the Python WSGI protocol (PEP 333, rev 1.21).
Based on wsgiref.simple_server which is part of the standard library since 2.5.
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. DON'T USE IT FOR PRODUCTION USE!
"""
import os
import socket
import sys
import traceback
import urllib
import urlparse
from SocketServer import ThreadingMixIn
from wsgiref import simple_server
from wsgiref.util import FileWrapper # for backwards compatibility
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.core.wsgi import get_wsgi_application
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.views import static
from django.contrib.staticfiles import handlers
__all__ = ['WSGIServer', 'WSGIRequestHandler']
def get_internal_wsgi_application():
"""
Loads and returns the WSGI application as configured by the user in
``settings.WSGI_APPLICATION``. With the default ``startproject`` layout,
this will be the ``application`` object in ``projectname/wsgi.py``.
This function, and the ``WSGI_APPLICATION`` setting itself, are only useful
for Django's internal servers (runserver, runfcgi); external WSGI servers
should just be configured to point to the correct application object
directly.
If settings.WSGI_APPLICATION is not set (is ``None``), we just return
whatever ``django.core.wsgi.get_wsgi_application`` returns.
"""
from django.conf import settings
app_path = getattr(settings, 'WSGI_APPLICATION')
if app_path is None:
return get_wsgi_application()
module_name, attr = app_path.rsplit('.', 1)
try:
mod = import_module(module_name)
except ImportError, e:
raise ImproperlyConfigured(
"WSGI application '%s' could not be loaded; "
"could not import module '%s': %s" % (app_path, module_name, e))
try:
app = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured(
"WSGI application '%s' could not be loaded; "
"can't find '%s' in module '%s': %s"
% (app_path, attr, module_name, e))
return app
class WSGIServerException(Exception):
pass
class ServerHandler(simple_server.ServerHandler, object):
error_status = "500 INTERNAL SERVER ERROR"
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
class WSGIServer(simple_server.WSGIServer, object):
"""BaseHTTPServer that implements the Python WSGI protocol"""
def __init__(self, *args, **kwargs):
if kwargs.pop('ipv6', False):
self.address_family = socket.AF_INET6
super(WSGIServer, self).__init__(*args, **kwargs)
def server_bind(self):
"""Override server_bind to store the server name."""
try:
super(WSGIServer, self).server_bind()
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
class WSGIRequestHandler(simple_server.WSGIRequestHandler, object):
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = urlparse.urljoin(settings.STATIC_URL, 'admin/')
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
super(WSGIRequestHandler, self).__init__(*args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if (self.path.startswith(self.admin_media_prefix)
or self.path == '/favicon.ico'):
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(handlers.StaticFilesHandler):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the STATIC_URL setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
This is pending for deprecation since 1.3.
"""
def get_base_dir(self):
return os.path.join(django.__path__[0], 'contrib', 'admin', 'static', 'admin')
def get_base_url(self):
from django.conf import settings
return urlparse.urljoin(settings.STATIC_URL, 'admin/')
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``self.base_url``. If the
resulting file path is outside the media directory, then a ValueError
is raised.
"""
relative_url = url[len(self.base_url[2]):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.base_dir, relative_path)
def serve(self, request):
document_root, path = os.path.split(self.file_path(request.path))
return static.serve(request, path, document_root=document_root)
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the base path
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def run(addr, port, wsgi_handler, ipv6=False, threading=False):
server_address = (addr, port)
if threading:
httpd_cls = type('WSGIServer', (ThreadingMixIn, WSGIServer), {})
else:
httpd_cls = WSGIServer
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| mit |
tliron/ronin | examples/installing/build.py | 1 | 1465 | #!/usr/bin/env python
#
# Installing Example
#
# build.py
#
# Requirements:
#
# Ubuntu: sudo apt install gcc ccache
#
# To install your build, use the Copy executor and set "inputs_from=" to the generating phase,
# as well as "output_strip_prefix_from=" if you want to strip the original output path.
#
# Run with the "--install" CLI argument to enable "build.install" in the context. In this example
# we're using "paths.install" in the context to override a safe default install path, so a complete
# install command would look something like this:
#
# ./build.py --install --set paths.install=/usr/bin
#
from ronin.cli import cli
from ronin.contexts import new_context
from ronin.files import Copy
from ronin.gcc import GccBuild
from ronin.phases import Phase
from ronin.projects import Project
from ronin.utils.paths import glob, join_path
with new_context() as ctx:
project = Project('Installing Example')
install_path = ctx.get('paths.install', join_path(ctx.paths.root, 'install'))
Phase(project=project,
name='build',
executor=GccBuild(),
inputs=glob('src/**/*.c'),
output='size',
run_output=1 if ctx.build.run else 0)
if ctx.build.install:
Phase(project=project,
name='install',
executor=Copy(),
inputs_from=['build'],
output_strip_prefix_from='build',
output_path=install_path)
cli(project)
| apache-2.0 |
mandoku/mdweb | app/main/errors.py | 1 | 1045 | # -*- coding: utf-8 -*-
from flask import render_template, request, jsonify
from . import main
@main.app_errorhandler(403)
def forbidden(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'forbidden'})
response.status_code = 403
return response
return render_template('403.html'), 403
@main.app_errorhandler(404)
def page_not_found(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'not found'})
response.status_code = 404
return response
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'internal server error'})
response.status_code = 500
return response
return render_template('500.html'), 500
| mit |
hainm/dask | dask/dataframe/shuffle.py | 4 | 2967 | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from pframe import pframe
from .. import threaded
from .core import DataFrame, Series, get, names
from ..compatibility import unicode
from ..utils import ignoring
tokens = ('-%d' % i for i in count(1))
def set_index(f, index, npartitions=None, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or f.npartitions
if not isinstance(index, Series):
index2 = f[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1)[1:-1])
.compute())
return f.set_partition(index, divisions, **kwargs)
partition_names = ('set_partition-%d' % i for i in count(1))
def set_partition(f, index, divisions, get=threaded.get, **kwargs):
""" Set new partitioning along index given divisions """
divisions = unique(divisions)
name = next(names)
if isinstance(index, Series):
assert index.divisions == f.divisions
dsk = dict(((name, i), (f._partition_type.set_index, block, ind))
for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))
f2 = type(f)(merge(f.dask, index.dask, dsk), name,
f.column_info, f.divisions)
else:
dsk = dict(((name, i), (f._partition_type.set_index, block, index))
for i, block in enumerate(f._keys()))
f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)
head = f2.head()
pf = pframe(like=head, divisions=divisions, **kwargs)
def append(block):
pf.append(block)
return 0
f2.map_blocks(append).compute(get=get)
pf.flush()
return from_pframe(pf)
def from_pframe(pf):
""" Load dask.array from pframe """
name = next(names)
dsk = dict(((name, i), (pframe.get_partition, pf, i))
for i in range(pf.npartitions))
return DataFrame(dsk, name, pf.columns, pf.divisions)
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
| bsd-3-clause |
auto-mat/klub | apps/aklub/autocom.py | 1 | 6964 | # -*- coding: utf-8 -*-
# Author: Hynek Hanke <[email protected]>
#
# Copyright (C) 2010 o.s. Auto*Mat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Automatic communications for club management"""
import datetime
import logging
import string
from django.core.exceptions import ValidationError
logger = logging.getLogger(__name__)
def _localize_enum(descr, val, lang):
for t in descr:
if t[0] == val:
# Hack! Here we should use the Gettext localization
# based on the value of 'lang' -- this is however
# not easy due to lazy translations and t[1] already
# being wrapped by a foreign translator
if lang == 'cs':
return t[1].lower()
else:
# less wrong would be to retrieve the original untranslated version of t[1]...
return t[0]
# Translation not found
return val
KNOWN_VARIABLES = [
"addressment", "name", "firstname", "surname", "street", "city", "zipcode", "email",
"telephone", "regular_amount", "regular_frequency", "var_symbol", "last_payment_amount",
"auth_token",
]
def min_non_negative(i, j):
if i < 0:
return j
if j < 0:
return i
return min(i, j)
def gendrify_text(text, sex=''):
# Modify text according to gender
# Example: Vazen{y|a} {pane|pani} -> [male] -> Vazeny pane
gender_text = ""
o = 0
i = 0
while i < len(text):
if text[i] == '{':
gender_text += text[o:i]
sep_pos = min_non_negative(text.find('|', i), text.find('/', i))
end_pos = text.find('}', i)
if sep_pos <= i or end_pos <= sep_pos:
raise ValidationError("Gender strings must look like {male_variant|female_variant} or {male_variant/female_variant}")
male_variant = text[i + 1:sep_pos]
female_variant = text[sep_pos + 1:end_pos]
if sex == 'male':
gender_text += male_variant
elif sex == 'female':
gender_text += female_variant
else:
gender_text += male_variant + "/" + female_variant
o = end_pos + 1
i = end_pos
i += 1
gender_text += text[o:]
return gender_text
def process_template(template_string, user, payment_channel):
from .models import DonorPaymentChannel
from sesame import utils as sesame_utils
template = string.Template(template_string)
if payment_channel:
payment_substitutes = {
'regular_amount': payment_channel.regular_amount,
'regular_frequency': _localize_enum(
DonorPaymentChannel.REGULAR_PAYMENT_FREQUENCIES,
payment_channel.regular_frequency,
user.language,
),
'var_symbol': payment_channel.VS,
'last_payment_amount': payment_channel.last_payment.amount if payment_channel.last_payment else None,
}
else:
payment_substitutes = {}
# Make variable substitutions
text = template.substitute(
addressment=user.get_addressment(),
last_name_vokativ=user.get_last_name_vokativ(),
name=user.first_name if hasattr(user, 'first_name') else user.name,
firstname=user.first_name if hasattr(user, 'first_name') else user.name,
surname=user.last_name if hasattr(user, 'first_name') else user.name,
street=user.street,
city=user.city,
zipcode=user.zip_code,
email=user.email,
telephone=user.get_telephone(),
auth_token=sesame_utils.get_query_string(user),
**payment_substitutes,
)
return gendrify_text(text, user.sex if hasattr(user, 'sex') else '')
def check(user_profiles=None, action=None): # noqa
from .models import AutomaticCommunication, DonorPaymentChannel, UserProfile
from interactions.models import Interaction
if not user_profiles:
user_profiles = UserProfile.objects.all()
# limit autocoms only for autocoms where action is used
if action:
# TODO: handle nested conditions?
from flexible_filter_conditions.models import TerminalCondition
conditions = TerminalCondition.objects.filter(variable='action', value=action).values_list('condition')
auto_coms = AutomaticCommunication.objects.filter(condition__conditions__in=conditions)
else:
auto_coms = AutomaticCommunication.objects.all()
for auto_comm in auto_coms:
logger.info(
u"Processin condition \"%s\" for autocom \"%s\", method: \"%s\", action: \"%s\"" % (
auto_comm.condition,
auto_comm,
auto_comm.method_type,
action,
),
)
filtered_user_profiles = auto_comm.condition.filter_queryset(user_profiles, action)
for user in filtered_user_profiles:
try:
if auto_comm.event:
payment_channel = user.userchannels.get(event=auto_comm.event)
else:
payment_channel = None
except DonorPaymentChannel.DoesNotExist:
payment_channel = None
if auto_comm.only_once and auto_comm.sent_to_users.filter(pk=user.pk).exists():
continue
if user.language == 'cs':
template = auto_comm.template
subject = auto_comm.subject
else:
template = auto_comm.template_en
subject = auto_comm.subject_en
if template and template != '':
logger.info(u"Added new automatic communication \"%s\" for user \"%s\", action \"%s\"" % (auto_comm, user, action))
c = Interaction(
user=user,
type=auto_comm.method_type,
date_from=datetime.datetime.now(),
subject=subject,
summary=process_template(template, user, payment_channel),
note="Prepared by automated mailer at %s" % datetime.datetime.now(),
settlement='a',
administrative_unit=auto_comm.administrative_unit,
)
auto_comm.sent_to_users.add(user)
c.save()
| gpl-3.0 |
duducosmos/pgs4a | python-install/lib/python2.7/test/test_platform.py | 17 | 9707 | import sys
import os
import unittest
import platform
import subprocess
from test import test_support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
if hasattr(os, "symlink"):
def test_architecture_via_symlink(self): # issue3762
def get(python):
cmd = [python, '-c',
'import platform; print platform.architecture()']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(test_support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_subversion = sys.subversion
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys.subversion = self.save_subversion
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
):
# branch and revision are not "parsed", but fetched
# from sys.subversion. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, subversion, sys_platform), info in \
sys_versions.iteritems():
sys.version = version_tag
if subversion is None:
if hasattr(sys, "subversion"):
del sys.subversion
else:
sys.subversion = subversion
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with test_support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
try:
import gestalt
except ImportError:
have_toolbox_glue = False
else:
have_toolbox_glue = True
if have_toolbox_glue and platform.uname()[0] == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertEqual(res[2], 'i386')
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
res = platform.dist()
def test_libc_ver(self):
import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
else:
executable = sys.executable
res = platform.libc_ver(executable)
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_main():
test_support.run_unittest(
PlatformTest
)
if __name__ == '__main__':
test_main()
| lgpl-2.1 |
PaulPetring/zulip | zerver/management/commands/enqueue_file.py | 115 | 1715 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
import sys
import ujson
def error(*args):
raise Exception('We cannot enqueue because settings.USING_RABBITMQ is False.')
class Command(BaseCommand):
help = """Read JSON lines from a file and enqueue them to a worker queue.
Each line in the file should either be a JSON payload or two tab-separated
fields, the second of which is a JSON payload. (The latter is to accomodate
the format of error files written by queue workers that catch exceptions--their
first field is a timestamp that we ignore.)
You can use "-" to represent stdin.
"""
def add_arguments(self, parser):
parser.add_argument('queue_name', metavar='<queue>', type=str,
help="name of worker queue to enqueue to")
parser.add_argument('file_name', metavar='<file>', type=str,
help="name of file containing JSON lines")
def handle(self, *args, **options):
queue_name = options['queue_name']
file_name = options['file_name']
if file_name == '-':
f = sys.stdin
else:
f = open(file_name)
while True:
line = f.readline()
if not line:
break
line = line.strip()
try:
payload = line.split('\t')[1]
except IndexError:
payload = line
print 'Queueing to queue %s: %s' % (queue_name, payload)
# Verify that payload is valid json.
data = ujson.loads(payload)
queue_json_publish(queue_name, data, error)
| apache-2.0 |
opennode/waldur-mastermind | src/waldur_core/structure/tests/unittests/test_handlers.py | 1 | 5755 | from unittest import mock
from django.test import TestCase
from waldur_core.structure import models as structure_models
from waldur_core.structure.tests import factories, fixtures
class LogProjectSaveTest(TestCase):
@mock.patch('waldur_core.structure.handlers.event_logger')
def test_logger_called_once_on_project_create(self, logger_mock):
new_project = factories.ProjectFactory()
logger_mock.project.info.assert_called_once_with(
'Project {project_name} has been created.',
event_type='project_creation_succeeded',
event_context={'project': new_project,},
)
def test_logger_called_once_on_project_name_update(self):
new_project = factories.ProjectFactory()
old_name = new_project.name
with mock.patch('waldur_core.structure.handlers.event_logger') as logger_mock:
new_project.name = 'new name'
new_project.save()
logger_mock.project.info.assert_called_once_with(
"Project {project_name} has been updated. Name has been changed from '%s' to '%s'."
% (old_name, new_project.name,),
event_type='project_update_succeeded',
event_context={'project': new_project,},
)
def test_logger_logs_project_name_and_description_when_updated(self):
new_project = factories.ProjectFactory(description='description', name='name')
with mock.patch('waldur_core.structure.handlers.event_logger') as logger_mock:
new_project.name = 'new name'
new_project.description = 'new description'
new_project.save()
expected_message = (
'Project {project_name} has been updated.'
" Description has been changed from 'description' to 'new description'."
" Name has been changed from 'name' to 'new name'."
)
logger_mock.project.info.assert_called_once_with(
expected_message,
event_type='project_update_succeeded',
event_context={'project': new_project,},
)
class LogRoleEventTest(TestCase):
def test_logger_called_when_customer_role_is_granted(self):
fixture = fixtures.CustomerFixture()
owner = fixture.owner
with mock.patch(
'waldur_core.structure.handlers.event_logger.customer_role.info'
) as logger_mock:
fixture.customer.add_user(
fixture.user, structure_models.CustomerRole.OWNER, owner
)
logger_mock.assert_called_once_with(
'User {affected_user_username} has gained role of {role_name} in customer {customer_name}.',
event_type='role_granted',
event_context={
'customer': fixture.customer,
'user': fixture.owner,
'affected_user': fixture.user,
'structure_type': 'customer',
'role_name': 'Owner',
},
)
def test_logger_called_when_customer_role_is_revoked(self):
fixture = fixtures.CustomerFixture()
owner = fixture.owner
with mock.patch(
'waldur_core.structure.handlers.event_logger.customer_role.info'
) as logger_mock:
fixture.customer.remove_user(
owner, structure_models.CustomerRole.OWNER, fixture.staff
)
logger_mock.assert_called_once_with(
'User {affected_user_username} has lost role of {role_name} in customer {customer_name}.',
event_type='role_revoked',
event_context={
'customer': fixture.customer,
'user': fixture.staff,
'affected_user': fixture.owner,
'structure_type': 'customer',
'role_name': 'Owner',
},
)
def test_logger_called_when_project_role_is_granted(self):
fixture = fixtures.ProjectFixture()
with mock.patch(
'waldur_core.structure.handlers.event_logger.project_role.info'
) as logger_mock:
fixture.project.add_user(
fixture.user, structure_models.ProjectRole.MANAGER, fixture.owner
)
logger_mock.assert_called_once_with(
'User {affected_user_username} has gained role of {role_name} in project {project_name}.',
event_type='role_granted',
event_context={
'project': fixture.project,
'user': fixture.owner,
'affected_user': fixture.user,
'structure_type': 'project',
'role_name': 'Manager',
},
)
def test_logger_called_when_project_role_is_revoked(self):
fixture = fixtures.ProjectFixture()
manager = fixture.manager
with mock.patch(
'waldur_core.structure.handlers.event_logger.project_role.info'
) as logger_mock:
fixture.project.remove_user(
manager, structure_models.ProjectRole.MANAGER, fixture.owner
)
logger_mock.assert_called_once_with(
'User {affected_user_username} has revoked role of {role_name} in project {project_name}.',
event_type='role_revoked',
event_context={
'project': fixture.project,
'user': fixture.owner,
'affected_user': fixture.manager,
'structure_type': 'project',
'role_name': 'Manager',
},
)
| mit |
bonus-tm/euforia | events/distribute.py | 1 | 2523 | # Распределение зерна на еду и на посев
import re
import data, ask, act
from event import Event
class Distribute(Event):
"""Распределение зерна"""
#
def start(self):
"""docstring for start"""
self.min_for_food = data.resources['peasant'] + data.resources['soldier'] + 1
self.min_for_seed = min(data.resources['peasant'], data.resources['land'])
if ask.yesno("Желаете сами распорядиться запасами зерна?") \
or self.min_for_food + self.min_for_seed > data.resources['corn']:
self.manually()
msg = "Остаток зерна в амбарах: {:>10n} тонн"
else:
self.automatically()
msg = "Излишки зерна в амбарах: {:>10n} тонн"
data.resources['corn'] -= data.corn_for_food
data.resources['corn'] -= data.corn_for_seed
say.line(msg.format(data.resources['corn']))
#
def automatically(self):
""" Дефолтная раздача """
data.corn_for_food = self.min_for_food
data.corn_for_seed = self.min_for_seed
say.erase_line()
say.line("--> Выделена норма: {:>10n} тонн зерна".format(self.min_for_food + self.min_for_seed))
#
def manually(self):
""" Сколько зерна на еду, сколько на посев """
[data.corn_for_food, data.corn_for_seed] = ask.corn(data.resources['corn'])
#
def corn(max, input_func=default_input_func, try_once=False):
""" Сколько зерна на еду, сколько на посев """
done = False
while not done:
erase_line()
say.word("Сколько тонн зерна на еду, сколько на посев?")
answer = re.findall('[0-9]+', input_func())
try:
food = int(answer[0])
seed = int(answer[1])
if food > max:
done = try_once
food = -1
seed = -1
elif food + seed > max:
done = True
seed = max - food
else:
done = True
except ValueError:
done = try_once
return [food, seed]
| mit |
davidovich/pip | pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
Yelp/dumb-init | tests/exit_status_test.py | 1 | 1237 | import signal
import sys
from subprocess import Popen
import pytest
@pytest.mark.parametrize('exit_status', [0, 1, 2, 32, 64, 127, 254, 255])
@pytest.mark.usefixtures('both_debug_modes', 'both_setsid_modes')
def test_exit_status_regular_exit(exit_status):
"""dumb-init should exit with the same exit status as the process that it
supervises when that process exits normally.
"""
proc = Popen(('dumb-init', 'sh', '-c', 'exit {}'.format(exit_status)))
proc.wait()
assert proc.returncode == exit_status
@pytest.mark.parametrize(
'signal', [
signal.SIGTERM,
signal.SIGHUP,
signal.SIGQUIT,
signal.SIGKILL,
],
)
@pytest.mark.usefixtures('both_debug_modes', 'both_setsid_modes')
def test_exit_status_terminated_by_signal(signal):
"""dumb-init should exit with status 128 + signal when the child process is
terminated by a signal.
"""
# We use Python because sh is "dash" on Debian and "bash" on others.
# https://github.com/Yelp/dumb-init/issues/115
proc = Popen((
'dumb-init', sys.executable, '-c', 'import os; os.kill(os.getpid(), {})'.format(
signal,
),
))
proc.wait()
assert proc.returncode == 128 + signal
| mit |
sdphome/UHF_Reader | rfs/rootfs/usr/lib/python2.7/_pyio.py | 28 | 69294 | """
Python implementation of the io module.
"""
from __future__ import (print_function, unicode_literals)
import os
import abc
import codecs
import warnings
import errno
# Import thread instead of threading to reduce startup cost
try:
from thread import allocate_lock as Lock
except ImportError:
from dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
__metaclass__ = type
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super(IOError, self).__init__(errno, strerror)
if not isinstance(characters_written, (int, long)):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1,
encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int, long)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, (int, long)):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase:
__metaclass__ = abc.ABCMeta
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is not None and not isinstance(hint, (int, long)):
raise TypeError("integer or None expected")
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call."""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf.extend(initial_bytes)
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("integer argument expected, got {0!r}".format(
type(n)))
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
except IOError as e:
if e.errno != EINTR:
raise
continue
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline is not None and not isinstance(newline, basestring):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
try:
name = self.name
except AttributeError:
return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding)
else:
return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format(
name, self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError:
raise TypeError("an integer is required")
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| gpl-3.0 |
honnix/oh-my-zsh | plugins/git-prompt/gitstatus.py | 49 | 3299 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import re
from subprocess import Popen, PIPE, check_output
def get_tagname_or_hash():
"""return tagname if exists else hash"""
# get hash
hash_cmd = ['git', 'rev-parse', '--short', 'HEAD']
hash_ = check_output(hash_cmd).decode('utf-8').strip()
# get tagname
tags_cmd = ['git', 'for-each-ref', '--points-at=HEAD', '--count=2', '--sort=-version:refname', '--format=%(refname:short)', 'refs/tags']
tags = check_output(tags_cmd).decode('utf-8').split()
if tags:
return tags[0] + ('+' if len(tags) > 1 else '')
elif hash_:
return hash_
return None
# Re-use method from https://github.com/magicmonty/bash-git-prompt to get stashs count
def get_stash():
cmd = Popen(['git', 'rev-parse', '--git-dir'], stdout=PIPE, stderr=PIPE)
so, se = cmd.communicate()
stash_file = '%s%s' % (so.decode('utf-8').rstrip(), '/logs/refs/stash')
try:
with open(stash_file) as f:
return sum(1 for _ in f)
except IOError:
return 0
# `git status --porcelain --branch` can collect all information
# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind
po = Popen(['git', 'status', '--porcelain', '--branch'], env=dict(os.environ, LANG="C"), stdout=PIPE, stderr=PIPE)
stdout, sterr = po.communicate()
if po.returncode != 0:
sys.exit(0) # Not a git repository
# collect git status information
untracked, staged, changed, conflicts = [], [], [], []
ahead, behind = 0, 0
status = [(line[0], line[1], line[2:]) for line in stdout.decode('utf-8').splitlines()]
for st in status:
if st[0] == '#' and st[1] == '#':
if re.search('Initial commit on', st[2]) or re.search('No commits yet on', st[2]):
branch = st[2].split(' ')[-1]
elif re.search('no branch', st[2]): # detached status
branch = get_tagname_or_hash()
elif len(st[2].strip().split('...')) == 1:
branch = st[2].strip()
else:
# current and remote branch info
branch, rest = st[2].strip().split('...')
if len(rest.split(' ')) == 1:
# remote_branch = rest.split(' ')[0]
pass
else:
# ahead or behind
divergence = ' '.join(rest.split(' ')[1:])
divergence = divergence.lstrip('[').rstrip(']')
for div in divergence.split(', '):
if 'ahead' in div:
ahead = int(div[len('ahead '):].strip())
elif 'behind' in div:
behind = int(div[len('behind '):].strip())
elif st[0] == '?' and st[1] == '?':
untracked.append(st)
else:
if st[1] == 'M':
changed.append(st)
if st[0] == 'U':
conflicts.append(st)
elif st[0] != ' ':
staged.append(st)
stashed = get_stash()
if not changed and not staged and not conflicts and not untracked and not stashed:
clean = 1
else:
clean = 0
out = ' '.join([
branch,
str(ahead),
str(behind),
str(len(staged)),
str(len(conflicts)),
str(len(changed)),
str(len(untracked)),
str(stashed),
str(clean)
])
print(out, end='')
| mit |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/core/files/locks.py | 420 | 1791 | """
Portable file locking utilities.
Based partially on example by Jonathan Feignberg <[email protected]> in the Python
Cookbook, licensed under the Python Software License.
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
Example Usage::
>>> from django.core.files import locks
>>> f = open('./file', 'wb')
>>> locks.lock(f, locks.LOCK_EX)
>>> f.write('Django')
>>> f.close()
"""
__all__ = ('LOCK_EX','LOCK_SH','LOCK_NB','lock','unlock')
system_type = None
try:
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
__overlapped = pywintypes.OVERLAPPED()
system_type = 'nt'
except (ImportError, AttributeError):
pass
try:
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
system_type = 'posix'
except (ImportError, AttributeError):
pass
def fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return hasattr(f, 'fileno') and f.fileno() or f
if system_type == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(fd(file))
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(fd(file))
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
elif system_type == 'posix':
def lock(file, flags):
fcntl.lockf(fd(file), flags)
def unlock(file):
fcntl.lockf(fd(file), fcntl.LOCK_UN)
else:
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = None
# Dummy functions that don't do anything.
def lock(file, flags):
pass
def unlock(file):
pass
| apache-2.0 |
Antiun/bank-statement-reconcile | account_advanced_reconcile/base_advanced_reconciliation.py | 12 | 12243 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Contributor: Leonardo Pistone
# Copyright 2012-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from itertools import product
from openerp.osv import orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class easy_reconcile_advanced(orm.AbstractModel):
_name = 'easy.reconcile.advanced'
_inherit = 'easy.reconcile.base'
def _query_debit(self, cr, uid, rec, context=None):
"""Select all move (debit>0) as candidate. """
select = self._select(rec)
sql_from = self._from(rec)
where, params = self._where(rec)
where += " AND account_move_line.debit > 0 "
where2, params2 = self._get_filter(cr, uid, rec, context=context)
query = ' '.join((select, sql_from, where, where2))
cr.execute(query, params + params2)
return cr.dictfetchall()
def _query_credit(self, cr, uid, rec, context=None):
"""Select all move (credit>0) as candidate. """
select = self._select(rec)
sql_from = self._from(rec)
where, params = self._where(rec)
where += " AND account_move_line.credit > 0 "
where2, params2 = self._get_filter(cr, uid, rec, context=context)
query = ' '.join((select, sql_from, where, where2))
cr.execute(query, params + params2)
return cr.dictfetchall()
def _matchers(self, cr, uid, rec, move_line, context=None):
"""
Return the values used as matchers to find the opposite lines
All the matcher keys in the dict must have their equivalent in
the `_opposite_matchers`.
The values of each matcher key will be searched in the
one returned by the `_opposite_matchers`
Must be inherited to implement the matchers for one method
As instance, it can return:
return ('ref', move_line['rec'])
or
return (('partner_id', move_line['partner_id']),
('ref', "prefix_%s" % move_line['rec']))
All the matchers have to be found in the opposite lines
to consider them as "opposite"
The matchers will be evaluated in the same order as declared
vs the the opposite matchers, so you can gain performance by
declaring first the partners with the less computation.
All matchers should match with their opposite to be considered
as "matching".
So with the previous example, partner_id and ref have to be
equals on the opposite line matchers.
:return: tuple of tuples (key, value) where the keys are
the matchers keys
(must be the same than `_opposite_matchers` returns,
and their values to match in the opposite lines.
A matching key can have multiples values.
"""
raise NotImplementedError
def _opposite_matchers(self, cr, uid, rec, move_line, context=None):
"""
Return the values of the opposite line used as matchers
so the line is matched
Must be inherited to implement the matchers for one method
It can be inherited to apply some formatting of fields
(strip(), lower() and so on)
This method is the counterpart of the `_matchers()` method.
Each matcher has to yield its value respecting the order
of the `_matchers()`.
When a matcher does not correspond, the next matchers won't
be evaluated so the ones which need the less computation
have to be executed first.
If the `_matchers()` returns:
(('partner_id', move_line['partner_id']),
('ref', move_line['ref']))
Here, you should yield :
yield ('partner_id', move_line['partner_id'])
yield ('ref', move_line['ref'])
Note that a matcher can contain multiple values, as instance,
if for a move line, you want to search from its `ref` in the
`ref` or `name` fields of the opposite move lines, you have to
yield ('partner_id', move_line['partner_id'])
yield ('ref', (move_line['ref'], move_line['name'])
An OR is used between the values for the same key.
An AND is used between the differents keys.
:param dict move_line: values of the move_line
:yield: matchers as tuple ('matcher key', value(s))
"""
raise NotImplementedError
@staticmethod
def _compare_values(key, value, opposite_value):
"""Can be inherited to modify the equality condition
specifically according to the matcher key (maybe using
a like operator instead of equality on 'ref' as instance)
"""
# consider that empty vals are not valid matchers
# it can still be inherited for some special cases
# where it would be allowed
if not (value and opposite_value):
return False
if value == opposite_value:
return True
return False
@staticmethod
def _compare_matcher_values(key, values, opposite_values):
""" Compare every values from a matcher vs an opposite matcher
and return True if it matches
"""
for value, ovalue in product(values, opposite_values):
# we do not need to compare all values, if one matches
# we are done
if easy_reconcile_advanced._compare_values(key, value, ovalue):
return True
return False
@staticmethod
def _compare_matchers(matcher, opposite_matcher):
"""
Prepare and check the matchers to compare
"""
mkey, mvalue = matcher
omkey, omvalue = opposite_matcher
assert mkey == omkey, \
(_("A matcher %s is compared with a matcher %s, the _matchers and "
"_opposite_matchers are probably wrong") % (mkey, omkey))
if not isinstance(mvalue, (list, tuple)):
mvalue = mvalue,
if not isinstance(omvalue, (list, tuple)):
omvalue = omvalue,
return easy_reconcile_advanced._compare_matcher_values(mkey, mvalue,
omvalue)
def _compare_opposite(self, cr, uid, rec, move_line, opposite_move_line,
matchers, context=None):
""" Iterate over the matchers of the move lines vs opposite move lines
and if they all match, return True.
If all the matchers match for a move line and an opposite move line,
they are candidate for a reconciliation.
"""
opp_matchers = self._opposite_matchers(cr, uid, rec,
opposite_move_line,
context=context)
for matcher in matchers:
try:
opp_matcher = opp_matchers.next()
except StopIteration:
# if you fall here, you probably missed to put a `yield`
# in `_opposite_matchers()`
raise ValueError("Missing _opposite_matcher: %s" % matcher[0])
if not self._compare_matchers(matcher, opp_matcher):
# if any of the matcher fails, the opposite line
# is not a valid counterpart
# directly returns so the next yield of _opposite_matchers
# are not evaluated
return False
return True
def _search_opposites(self, cr, uid, rec, move_line, opposite_move_lines,
context=None):
"""Search the opposite move lines for a move line
:param dict move_line: the move line for which we search opposites
:param list opposite_move_lines: list of dict of move lines values,
the move lines we want to search for
:return: list of matching lines
"""
matchers = self._matchers(cr, uid, rec, move_line, context=context)
return [op for op in opposite_move_lines if
self._compare_opposite(
cr, uid, rec, move_line, op, matchers, context=context)]
def _action_rec(self, cr, uid, rec, context=None):
credit_lines = self._query_credit(cr, uid, rec, context=context)
debit_lines = self._query_debit(cr, uid, rec, context=context)
result = self._rec_auto_lines_advanced(
cr, uid, rec, credit_lines, debit_lines, context=context)
return result
def _skip_line(self, cr, uid, rec, move_line, context=None):
"""
When True is returned on some conditions, the credit move line
will be skipped for reconciliation. Can be inherited to
skip on some conditions. ie: ref or partner_id is empty.
"""
return False
def _rec_auto_lines_advanced(self, cr, uid, rec, credit_lines, debit_lines,
context=None):
""" Advanced reconciliation main loop """
reconciled_ids = []
partial_reconciled_ids = []
reconcile_groups = []
_logger.info("%d credit lines to reconcile", len(credit_lines))
for idx, credit_line in enumerate(credit_lines, start=1):
if idx % 50 == 0:
_logger.info("... %d/%d credit lines inspected ...", idx,
len(credit_lines))
if self._skip_line(cr, uid, rec, credit_line, context=context):
continue
opposite_lines = self._search_opposites(
cr, uid, rec, credit_line, debit_lines, context=context)
if not opposite_lines:
continue
opposite_ids = [l['id'] for l in opposite_lines]
line_ids = opposite_ids + [credit_line['id']]
for group in reconcile_groups:
if any([lid in group for lid in opposite_ids]):
_logger.debug("New lines %s matched with an existing "
"group %s", line_ids, group)
group.update(line_ids)
break
else:
_logger.debug("New group of lines matched %s", line_ids)
reconcile_groups.append(set(line_ids))
lines_by_id = dict([(l['id'], l) for l in credit_lines + debit_lines])
_logger.info("Found %d groups to reconcile", len(reconcile_groups))
for group_count, reconcile_group_ids in enumerate(reconcile_groups,
start=1):
_logger.debug("Reconciling group %d/%d with ids %s",
group_count, len(reconcile_groups),
reconcile_group_ids)
group_lines = [lines_by_id[lid] for lid in reconcile_group_ids]
reconciled, full = self._reconcile_lines(
cr, uid, rec, group_lines, allow_partial=True, context=context)
if reconciled and full:
reconciled_ids += reconcile_group_ids
elif reconciled:
partial_reconciled_ids += reconcile_group_ids
if (context['commit_every'] and
group_count % context['commit_every'] == 0):
cr.commit()
_logger.info("Commit the reconciliations after %d groups",
group_count)
_logger.info("Reconciliation is over")
return reconciled_ids, partial_reconciled_ids
| agpl-3.0 |
mdaniel/intellij-community | python/helpers/py3only/docutils/languages/da.py | 50 | 1872 | # -*- coding: utf-8 -*-
# $Id: da.py 7678 2013-07-03 09:57:36Z milde $
# Author: E D
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Danish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Forfatter',
'authors': 'Forfattere',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Dato',
'copyright': 'Copyright',
'dedication': 'Dedikation',
'abstract': 'Resumé',
'attention': 'Giv agt!',
'caution': 'Pas på!',
'danger': '!FARE!',
'error': 'Fejl',
'hint': 'Vink',
'important': 'Vigtigt',
'note': 'Bemærk',
'tip': 'Tips',
'warning': 'Advarsel',
'contents': 'Indhold'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'forfatter': 'author',
'forfattere': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'dato': 'date',
'copyright': 'copyright',
'dedikation': 'dedication',
'resume': 'abstract',
'resumé': 'abstract'}
"""Danish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
a4fr/my_nerd_bot | GoogleAppEngine/lib/werkzeug/script.py | 318 | 11249 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app, reloader, debugger, evalex,
extra_files, 1, threaded, processes,
static_files=static_files, ssl_context=ssl_context)
return action
| apache-2.0 |
nimasmi/wagtail | wagtail/admin/widgets.py | 3 | 11517 | import itertools
import json
from functools import total_ordering
from django import forms
from django.conf import settings
from django.forms import widgets
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.formats import get_format
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagWidget
from wagtail.admin.datetimepicker import to_datetimepicker_format
from wagtail.admin.staticfiles import versioned_static
from wagtail.core import hooks
from wagtail.core.models import Page
from wagtail.utils.widgets import WidgetWithScript
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATETIME_FORMAT = '%Y-%m-%d %H:%M'
class AdminAutoHeightTextInput(widgets.Textarea):
template_name = 'wagtailadmin/widgets/auto_height_text_input.html'
def __init__(self, attrs=None):
# Use more appropriate rows default, given autoheight will alter this anyway
default_attrs = {'rows': '1'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class AdminDateInput(widgets.DateInput):
template_name = 'wagtailadmin/widgets/date_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'new-date'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATE_FORMAT', DEFAULT_DATE_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminTimeInput(widgets.TimeInput):
template_name = 'wagtailadmin/widgets/time_input.html'
def __init__(self, attrs=None, format='%H:%M'):
default_attrs = {'autocomplete': 'new-time'}
if attrs:
default_attrs.update(attrs)
super().__init__(attrs=default_attrs, format=format)
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminDateTimeInput(widgets.DateTimeInput):
template_name = 'wagtailadmin/widgets/datetime_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'new-date-time'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATETIME_FORMAT', DEFAULT_DATETIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminTagWidget(TagWidget):
template_name = 'wagtailadmin/widgets/tag_widget.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['autocomplete_url'] = reverse('wagtailadmin_tag_autocomplete')
context['widget']['tag_spaces_allowed'] = getattr(settings, 'TAG_SPACES_ALLOWED', True)
context['widget']['tag_limit'] = getattr(settings, 'TAG_LIMIT', None)
return context
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = 'hidden'
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super().value_from_datadict(data, files, name)
if result == '':
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if 'choose_one_text' in kwargs:
self.choose_one_text = kwargs.pop('choose_one_text')
if 'choose_another_text' in kwargs:
self.choose_another_text = kwargs.pop('choose_another_text')
if 'clear_choice_text' in kwargs:
self.clear_choice_text = kwargs.pop('clear_choice_text')
if 'link_to_chosen_text' in kwargs:
self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
if 'show_edit_link' in kwargs:
self.show_edit_link = kwargs.pop('show_edit_link')
super().__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _('Choose a page')
choose_another_text = _('Choose another page')
link_to_chosen_text = _('Edit this page')
def __init__(self, target_models=None, can_choose_root=False, user_perms=None, **kwargs):
super().__init__(**kwargs)
if target_models:
model_names = [model._meta.verbose_name.title() for model in target_models if model is not Page]
if len(model_names) == 1:
self.choose_one_text += ' (' + model_names[0] + ')'
self.user_perms = user_perms
self.target_models = list(target_models or [Page])
self.can_choose_root = can_choose_root
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
def render_html(self, name, value, attrs):
model_class = self._get_lowest_common_page_class()
instance, value = self.get_instance_and_id(model_class, value)
original_field_html = super().render_html(name, value, attrs)
return render_to_string("wagtailadmin/widgets/page_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'page': instance,
})
def render_js_init(self, id_, name, value):
if isinstance(value, Page):
page = value
else:
# Value is an ID look up object
model_class = self._get_lowest_common_page_class()
page = self.get_instance(model_class, value)
parent = page.get_parent() if page else None
return "createPageChooser({id}, {model_names}, {parent}, {can_choose_root}, {user_perms});".format(
id=json.dumps(id_),
model_names=json.dumps([
'{app}.{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)
for model in self.target_models
]),
parent=json.dumps(parent.id if parent else None),
can_choose_root=('true' if self.can_choose_root else 'false'),
user_perms=json.dumps(self.user_perms),
)
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/page-chooser-modal.js'),
versioned_static('wagtailadmin/js/page-chooser.js'),
])
@total_ordering
class Button:
show = True
def __init__(self, label, url, classes=set(), attrs={}, priority=1000):
self.label = label
self.url = url
self.classes = classes
self.attrs = attrs.copy()
self.priority = priority
def render(self):
attrs = {'href': self.url, 'class': ' '.join(sorted(self.classes))}
attrs.update(self.attrs)
return format_html('<a{}>{}</a>', flatatt(attrs), self.label)
def __str__(self):
return self.render()
def __repr__(self):
return '<Button: {}>'.format(self.label)
def __lt__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.label == other.label
and self.url == other.url
and self.classes == other.classes
and self.attrs == other.attrs
and self.priority == other.priority)
class PageListingButton(Button):
def __init__(self, label, url, classes=set(), **kwargs):
classes = {'button', 'button-small', 'button-secondary'} | set(classes)
super().__init__(label, url, classes=classes, **kwargs)
class BaseDropdownMenuButton(Button):
def __init__(self, *args, **kwargs):
super().__init__(*args, url=None, **kwargs)
@cached_property
def dropdown_buttons(self):
raise NotImplementedError
def render(self):
return render_to_string(self.template_name, {
'buttons': self.dropdown_buttons,
'label': self.label,
'title': self.attrs.get('title'),
'is_parent': self.is_parent})
class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
template_name = 'wagtailadmin/pages/listing/_button_with_dropdown.html'
def __init__(self, label, hook_name, page, page_perms, is_parent, **kwargs):
self.hook_name = hook_name
self.page = page
self.page_perms = page_perms
self.is_parent = is_parent
super().__init__(label, **kwargs)
@property
def show(self):
return bool(self.dropdown_buttons)
@cached_property
def dropdown_buttons(self):
button_hooks = hooks.get_hooks(self.hook_name)
return sorted(itertools.chain.from_iterable(
hook(self.page, self.page_perms, self.is_parent)
for hook in button_hooks))
| bsd-3-clause |
reed-jpm-alpert/OpenMAMA.233 | site_scons/site_tools/scan-build.py | 13 | 1266 | """SCons.Tool.clang
Tool-specific initialization for clang.
"""
import re
import subprocess
import os
import SCons.Tool
import SCons.Util
sconsmod = __import__ ('SCons.Tool.g++', globals(), locals(), [])
sconstool = getattr (sconsmod, 'Tool')
gplusplus = getattr (sconstool, 'g++')
compilers = ['clang', 'clang++', 'ccc-analyzer', 'c++-analyzer']
def generate(env):
"""Setup GCC and G++ Env first"""
SCons.Tool.gcc.generate(env)
gplusplus.generate(env)
if env.GetOption('clean'):
return
env['CC'] = os.environ['CC'] or 'ccc-analyzer'
env['CXX'] = os.environ['CXX'] or 'c++-analyzer'
env['CLANG'] = os.environ['CLANG']
env['CLANG_CXX'] = os.environ['CLANG_CXX']
env['ENV']['CC'] = env['CC']
env['ENV']['CXX'] = env['CXX']
env['ENV']['CLANG'] = env['CLANG']
env['ENV']['CLANG_CXX'] = env['CLANG_CXX']
for item in os.environ.items():
if item[0].startswith('CCC_'):
env[item[0]] = item[1]
env['ENV'][item[0]] = item[1]
env['CXXFLAGS'].append('-fcolor-diagnostics')
env['CFLAGS'].append('-fcolor-diagnostics')
env['ENV']['CXXFLAGS'] = env['CXXFLAGS']
env['ENV']['CFLAGS'] = env['CFLAGS']
def exists(env):
return env.Detect(compilers)
| lgpl-2.1 |
challenge19/conpy | classify/classify_node.py | 1 | 2070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import networkx as nx
import numpy as np
import random
import Lib_gen_Graph as lig
import Lib_cal_control as lcc
import classify_by_add_node as can
import classify_by_renorm.classify_by_renorm_undirected as cru
#import StructuralRenorm.package.Lib_struct_renorm as lsr
import os
def cal_undirected(t,j,N):
print N,j,t,os.getpid()
#~ G = lig.scale_free_static(N,j,0.5,0.5,directed=False)
#~ G = lig.continuous_BA(N,j)
G = nx.erdos_renyi_graph(N,np.log(N)*j/N)
G = lcc.add_weight(G, 1)
#~ _, rk = lcc.cal_exact_Nd_simple(G)
nc, ni, nr = 0,0,0
#~ nc, ni, nr = can.classify_undirected_graph(G)
#~ nc, ni, nr = len(nc), len(ni), len(nr)
#~ print nc, ni, nr
nc, ni, nr = cru.classify_undirected_graph(G)
nc, ni, nr = len(nc), len(ni), len(nr)
return (j,1.0*nc/N,1.0*nr/N,1.0*ni/N)
def cal_directed(t,j,N):
print N,j,t,os.getpid()
#~ G = lig.scale_free_static(N,j,0.5,0.5,directed=False)
#~ G = lig.continuous_BA(N,j)
G = nx.erdos_renyi_graph(N,np.log(N)*j/N, directed=True)
# G = lic.add_weight(G, 1)
nc, ni, nr = 0,0,0
nc, ni, nr = can.classify_directed_graph(G)
nc, ni, nr = len(nc), len(ni), len(nr)
return (j,1.0*nc/N,1.0*nr/N,1.0*ni/N)
def cal_multiprocess():
from multiprocessing import Pool
pool = Pool(4)
simtime = 100
for N in [100]:
for y in xrange(50):
#~ results = [pool.apply_async(cal_classification,(x,j,N)) for j in np.arange(0.1, 0.5, 0.05)]
results = [pool.apply_async(cal_undirected,(x,j,N)) for j in np.arange(0.1, 1.5, 0.1) for x in xrange(simtime)]
roots = [r.get() for r in results]
#~ with open('degree-redundant-UNDIR.txt-ba'+str(N),'a') as f:
with open('renorm-weighted-degree-nr-UNDIR.txt-er'+str(N),'a') as f:
for ak,nc, nr, ni in roots:
print>>f, ak, nc, nr, ni
if __name__ == '__main__':
while 1:
print cal_undirected(1, 0.5, 100)
#~ cal_multiprocess()
| mit |
IronLanguages/ironpython2 | Tests/test_ipyc.py | 2 | 14655 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Testing IronPython Compiler
##
from iptest.assert_util import *
skiptest("win32")
from iptest.file_util import *
from iptest.process_util import *
import sys
import os
import System
from System.Collections.Generic import List
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
import IronPython
if False: #Needs to be updated or removed for DLR
from IronPython.Hosting import PythonCompiler
load_iron_python_test()
def CompileAsDll(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.TargetKind = System.Reflection.Emit.PEFileKinds.Dll
pc.Compile()
def CompileOneFileAsConsoleApp1(fileName, assemblyName, setMainFile) :
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
if setMainFile:
pc.MainFile = fileName
pc.Compile()
def CompileOneFileAsConsoleApp2(fileName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = "NotExistFile"
pc.Compile()
def CompileTwoFilesAsConsoleApp(fileName1, fileName2, assemblyName, setMainFile):
sources = List[str]()
sources.Add(fileName1)
sources.Add(fileName2)
pc = PythonCompiler(sources, assemblyName)
if (setMainFile):
pc.MainFile = fileName1
pc.Compile()
def UsingReference(fileName, typeName, assemblyName):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.MainFile = fileName
refAsms = List[str]()
refAsms.Add(System.Type.GetType(typeName).Assembly.FullName)
pc.ReferencedAssemblies = refAsms
pc.Compile()
def CheckIncludeDebugInformation(fileName, assemblyName, include):
sources = List[str]()
sources.Add(fileName)
pc = PythonCompiler(sources, assemblyName)
pc.IncludeDebugInformation = include
pc.Compile()
def FileExists(file):
return System.IO.File.Exists(file)
def DeleteFile(file):
for i in range(5):
try:
System.IO.File.Delete(file)
break
except:
System.Threading.Thread.Sleep(1000)
def FileRemoval(*files):
for file in files:
DeleteFile(file)
def GetFullPath(file):
return System.IO.Path.GetFullPath(file).ToLower()
def RunPythonExe(file, *args):
fullpath = GetFullPath(file)
temppath = System.IO.Path.Combine(sys.prefix, System.IO.FileInfo(fullpath).Name).ToLower()
if (fullpath != temppath):
System.IO.File.Copy(fullpath, temppath, True)
realargs = [temppath]
realargs.extend(args)
try:
retval = os.spawnv(0, temppath, realargs)
except:
retval = 1
# hack
if (fullpath != temppath):
DeleteFile(temppath)
Assert(not retval)
## compile as dll
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
write_to_file(source, '''
class B:
def M1(self):
return 20
''')
@disabled("Needs to be updated or removed for DLR")
def test_sanity():
FileRemoval(assembly, pdbfile);
CompileAsDll(source, assembly)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
@disabled("Needs to be updated or removed for DLR")
def test_one_source_consoleapp():
## compile as exe
## if only one source file, you do not necessarily specify the main file
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.exe", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CompileOneFileAsConsoleApp1(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
## compile as exe, but main file is INVALID
AssertError(Exception, CompileOneFileAsConsoleApp2, source, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_two_source_consoleapp():
## compile 2 files as exe
source1, source2, assembly, pdbfile = "tempFile2.tpy", "tempFile1.tpy", "tempFile2.exe", "tempFile2.pdb"
write_to_file(source1, '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
''')
FileRemoval(assembly, pdbfile);
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
## compile 2 files as exe, but main file is not set
AssertError(Exception, CompileTwoFilesAsConsoleApp, source1, source2, assembly, False)
@disabled("Needs to be updated or removed for DLR")
def test_debug_consoleapp():
## IncludeDebugInformation
source, assembly, pdbfile = "tempFile1.tpy", "tempFile1.dll", "tempFile1.pdb"
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
FileRemoval(assembly, pdbfile);
CheckIncludeDebugInformation(source, assembly, False)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile) == False)
@disabled("Needs to be updated or removed for DLR")
def test_referenced_assemblies_consoleapp():
## Test Using referenced assemblies
source, assembly, pdbfile = "tempFile3.tpy", "tempFile3.exe", "tempFile3.pdb"
import clr
clr.AddReferenceByPartialName("System.Xml")
# sys.LoadAssembly...("System.xml") is emitted because of referenced assemblies specified
write_to_file(source, '''
import System
import System.Xml
tw = System.Xml.XmlTextWriter("tempResult.xml", System.Text.Encoding.ASCII)
tw.WriteStartDocument()
tw.WriteStartElement("PythonCompiler")
tw.WriteEndElement()
tw.WriteEndDocument()
tw.Close()
''')
fullTypeName = System.Type.GetType("System.Int32").AssemblyQualifiedName.split(',', 2)
UsingReference(source, "System.Xml.XmlTextReader, System.Xml," + fullTypeName[2], assembly)
tempXml = "tempResult.xml"
## BE CLEAN
FileRemoval(tempXml)
## RUN
RunPythonExe(assembly)
## CHECK
Assert(FileExists(tempXml), "File was not generated after running the exe")
f = open(tempXml)
Assert(f.read().find("PythonCompiler") <> -1, "The specified word is not found in the file")
f.close()
FileRemoval(tempXml)
for filename in ['tempFile1', 'tempFile2', 'tempFile3']:
for suffix in [ 'tpy', 'dll', 'exe', 'pdb']:
FileRemoval(filename + '.' + suffix)
#
# verify that generated exe will run stand alone.
#
@disabled("Needs to be updated or removed for DLR")
def test_exe_standalone():
tempFile1 = '''
class B:
def M1(self):
return 20
'''
tempFile2 = '''
import tempFile1
class D(tempFile1.B):
def M2(self):
return 100
b = tempFile1.B()
if (b.M1() != 20) :
raise AssertionError("failed 1")
d= D()
if (d.M2() != 100):
raise AssertionError("failed 2")
'''
tempFileName1 = GetFullPath("tempFile1.py")
tempFileName2 = GetFullPath("tempFile2.py")
tempExeName1 = GetFullPath("tempFile1.exe")
tempExeName2 = GetFullPath("tempFile2.exe")
tempPdbName1 = GetFullPath("tempFile1.pdb")
tempPdbName2 = GetFullPath("tempFile2.pdb")
write_to_file(tempFileName1, tempFile1)
write_to_file(tempFileName2, tempFile2)
AreEqual(launch_ironpython_changing_extensions(tempFileName2, ["-X:SaveAssemblies"], ["-X:LightweightScopes", "-X:AssembliesDir"]), 0)
RunPythonExe(tempExeName2)
FileRemoval(tempFileName1, tempFileName2, tempExeName1, tempExeName2, tempPdbName1, tempPdbName2)
#
# Verify that the executable doesn't get generated
#
tempFile1 = """
import System
files = map(lambda extension: System.IO.Path.ChangeExtension(__file__, extension), [".dll", ".exe", ".pdb"])
for file in files:
if System.IO.File.Exists(file):
print file, "exists"
raise AssertionError(file + " exists")
"""
write_to_file(tempFileName1, tempFile1)
AreEqual(launch_ironpython_changing_extensions(tempFileName1, [], ["-X:SaveAssemblies"]), 0)
FileRemoval(tempFileName1, tempExeName1, tempPdbName1)
source1 = "tempFile1.tpy"
source2 = "tempFile2.tpy"
assembly = "tempFile1.exe"
pdbfile = "tempFile1.pdb"
write_to_file(source1, """
import tempFile2
if tempFile2.value != 8.0:
raise AssertionError("failed import built-in")
""")
write_to_file(source2, """
import math
value = math.pow(2, 3)
""")
CompileTwoFilesAsConsoleApp(source1, source2, assembly, True)
Assert(FileExists(assembly))
Assert(FileExists(pdbfile))
RunPythonExe(assembly)
FileRemoval(source1, source2, assembly)
# verify arguments are passed through...
write_to_file(source1, """
import sys
def CustomAssert(c):
if not c: raise AssertionError("Assertin Failed")
CustomAssert(sys.argv[0].lower() == sys.argv[4].lower())
sys.exit(int(sys.argv[1]) + int(sys.argv[2]) + int(sys.argv[3]))
""")
CompileOneFileAsConsoleApp1(source1, assembly, False)
RunPythonExe(assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
RunPythonExe(".\\" + assembly, 24, -22, -2, System.IO.Path.Combine(sys.prefix, assembly))
FileRemoval(source1, assembly)
@disabled("Needs to be updated or removed for DLR")
def test_compilersinktest():
from IronPythonTest import PythonCompilerSinkTest
st = PythonCompilerSinkTest()
for s in ['''
class Class:zxvc
"Description of Class"cxvxcvb
''',
'(1 and 1) = 1',
'(lambda x: x = 1 )',
' print 1',
'(name1 =1) = 1',
'''d = {}
for x in d.keys()
pass
''',
]:
Assert(st.CompileWithTestSink(s) > 0)
for s in [
'name = 1',
'(name) = 1',
'(name1,name2) = (1,2)',
'name, = (1,0)',
]:
Assert(st.CompileWithTestSink(s) == 0)
@disabled("ResourceFile is not available anymore")
def test_ip_hosting_resource_file():
'''
Test to hit IronPython.Hosting.ResourceFile.
'''
rf_list = [ IronPython.Hosting.ResourceFile("name0", "file0"),
IronPython.Hosting.ResourceFile("name1", "file1", False),
]
rf_list[0].PublicResource = False
for i in xrange(len(rf_list)):
AreEqual(rf_list[i].Name, "name" + str(i))
rf_list[i].Name = "name"
AreEqual(rf_list[i].Name, "name")
AreEqual(rf_list[i].File, "file" + str(i))
rf_list[i].File = "file"
AreEqual(rf_list[i].File, "file")
AreEqual(rf_list[i].PublicResource, False)
rf_list[i].PublicResource = True
AreEqual(rf_list[i].PublicResource, True)
@skip("multiple_execute")
@skip("netstandard") # no clr.CompileModules in netstandard
def test_compiled_code():
if System.Environment.GetEnvironmentVariable('DLR_SaveAssemblies'):
# The SaveAssemblies option is not compatible with saving code to disk
print '... skipping test if DLR_SaveAssemblies is set...'
return
import clr
pyil = os.path.join(testpath.temporary_dir, 'test.pyil')
# make sure we can compile
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'))
# make sure we can compile multiple files
clr.CompileModules(pyil, os.path.join(testpath.public_testdir, 'test_class.py'), os.path.join(testpath.public_testdir, 'test_slice.py'))
clr.AddReferenceToFileAndPath(pyil)
import nt
# and make sure we can run some reasonable sophisticated code...
System.IO.File.Move(os.path.join(testpath.public_testdir, 'test_class.py'), 'old_test_class.py')
try:
import test_class
Assert(test_class.test_oldstyle_getattr.__doc__ != '')
finally:
System.IO.File.Move('old_test_class.py', os.path.join(testpath.public_testdir, 'test_class.py'))
@skip("multiple_execute")
@skip("netstandard") # no System.ICloneable in netstandard
def test_cached_types():
import clr
from System import IComparable, IFormattable, ICloneable
import IronPythonTest
cwd = os.getcwd()
os.chdir(testpath.temporary_dir)
# basic sanity test that we can compile...
clr.CompileSubclassTypes('test', (object, ))
clr.CompileSubclassTypes('test', object)
clr.CompileSubclassTypes('test', object, str, int, long, float, complex)
clr.CompileSubclassTypes('test', (object, IComparable[()]))
clr.CompileSubclassTypes('test', (object, IComparable[()]), (str, IComparable[()]))
# build an unlikely existing type and make sure construction gives us
# back the correct type.
clr.CompileSubclassTypes('cached_type_dll', (object, IComparable[()], IFormattable, ICloneable))
asm = System.Reflection.Assembly.LoadFrom(os.path.join(testpath.temporary_dir, 'cached_type_dll.dll'))
clr.AddReference(asm)
class x(object, IComparable[()], IFormattable, ICloneable):
pass
a = x()
AreEqual(clr.GetClrType(x).Assembly, asm)
# collect all types that are available in IronPythonTest and
# pre-gen them, then run test_inheritance to make sure it all works.
types = []
queue = [IronPythonTest]
while queue:
cur = queue.pop()
for name in dir(cur):
attr = getattr(cur, name)
if type(attr) is type:
clrType = clr.GetClrType(attr)
if clrType.IsEnum or clrType.IsSealed or clrType.IsValueType or clrType.ContainsGenericParameters:
continue
types.append(attr)
elif type(attr) == type(IronPythonTest):
queue.append(attr)
clr.CompileSubclassTypes('InheritanceTypes', *types)
clr.AddReferenceToFileAndPath(os.path.join(testpath.temporary_dir, 'InheritanceTypes.dll'))
import test_inheritance
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21892
# verify that GetSubclassedTypes round trips with clr.CompileSubclassTypes
clr.CompileSubclassTypes('finaltest', *clr.GetSubclassedTypes())
clr.AddReference('finaltest')
os.chdir(cwd)
run_test(__name__)
os.remove('tempFile1.tpy')
| apache-2.0 |
urandu/rethinkdb | external/v8_3.30.33.16/buildtools/checkdeps/cpp_checker.py | 50 | 3436 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks C++ and Objective-C files for illegal includes."""
import codecs
import os
import re
import results
from rules import Rule, MessageRule
class CppChecker(object):
EXTENSIONS = [
'.h',
'.cc',
'.cpp',
'.m',
'.mm',
]
# The maximum number of non-include lines we can see before giving up.
_MAX_UNINTERESTING_LINES = 50
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
_MAX_LINE_LENGTH = 128
# This regular expression will be used to extract filenames from include
# statements.
_EXTRACT_INCLUDE_PATH = re.compile(
'[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
def __init__(self, verbose):
self._verbose = verbose
def CheckLine(self, rules, line, dependee_path, fail_on_temp_allow=False):
"""Checks the given line with the given rule set.
Returns a tuple (is_include, dependency_violation) where
is_include is True only if the line is an #include or #import
statement, and dependency_violation is an instance of
results.DependencyViolation if the line violates a rule, or None
if it does not.
"""
found_item = self._EXTRACT_INCLUDE_PATH.match(line)
if not found_item:
return False, None # Not a match
include_path = found_item.group(1)
if '\\' in include_path:
return True, results.DependencyViolation(
include_path,
MessageRule('Include paths may not include backslashes.'),
rules)
if '/' not in include_path:
# Don't fail when no directory is specified. We may want to be more
# strict about this in the future.
if self._verbose:
print ' WARNING: include specified with no directory: ' + include_path
return True, None
rule = rules.RuleApplyingTo(include_path, dependee_path)
if (rule.allow == Rule.DISALLOW or
(fail_on_temp_allow and rule.allow == Rule.TEMP_ALLOW)):
return True, results.DependencyViolation(include_path, rule, rules)
return True, None
def CheckFile(self, rules, filepath):
if self._verbose:
print 'Checking: ' + filepath
dependee_status = results.DependeeStatus(filepath)
ret_val = '' # We'll collect the error messages in here
last_include = 0
with codecs.open(filepath, encoding='utf-8') as f:
in_if0 = 0
for line_num, line in enumerate(f):
if line_num - last_include > self._MAX_UNINTERESTING_LINES:
break
line = line.strip()
# Check to see if we're at / inside an #if 0 block
if line.startswith('#if 0'):
in_if0 += 1
continue
if in_if0 > 0:
if line.startswith('#if'):
in_if0 += 1
elif line.startswith('#endif'):
in_if0 -= 1
continue
is_include, violation = self.CheckLine(rules, line, filepath)
if is_include:
last_include = line_num
if violation:
dependee_status.AddViolation(violation)
return dependee_status
@staticmethod
def IsCppFile(file_path):
"""Returns True iff the given path ends in one of the extensions
handled by this checker.
"""
return os.path.splitext(file_path)[1] in CppChecker.EXTENSIONS
| agpl-3.0 |
lamby/pkg-fabric | fabric/tasks.py | 9 | 15409 | from __future__ import with_statement
from functools import wraps
import inspect
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error
from fabric.network import to_dict, normalize_to_string, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
.. versionadded:: 1.1
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return the host list the given task should be using.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs)
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs)
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = map(_get_list(env), "hosts roles exclude_hosts".split())
env_vars.append(roledefs)
return merge(*env_vars)
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print("Parallel tasks now using pool size of %d" % pool_size)
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. versionadded:: 1.1
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
return get_task_details(self.wrapped)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any(map(
lambda x: requires_parallel(crawl(x[0], state.commands)),
commands_to_run
))
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
key = normalize_to_string(state.env.host_string)
state.connections.pop(key, "")
submit(task.run(*args, **kwargs))
except BaseException, e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
raise
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = callable(task)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
abort("%r is not callable or a valid task name" % (task,))
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'] = task.get_hosts(hosts, roles, exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError, e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in ran_jobs.iteritems():
if d['exit_code'] != 0:
if isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
| bsd-2-clause |
b-jesch/plugin.program.tvhighlights | starter.py | 2 | 5768 | #!/usr/bin/python
###########################################################################
#
# FILE: plugin.program.tvhighlights/starter.py
#
# AUTHOR: Tobias D. Oestreicher
#
# LICENSE: GPLv3 <http://www.gnu.org/licenses/gpl.txt>
# VERSION: 0.1.5
# CREATED: 05.02.2016
#
###########################################################################
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
# CHANGELOG: (02.09.2015) TDOe - First Publishing
###########################################################################
import os,re,xbmc,xbmcgui,xbmcaddon
__addon__ = xbmcaddon.Addon()
__addonID__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
__path__ = __addon__.getAddonInfo('path')
__LS__ = __addon__.getLocalizedString
__icon__ = xbmc.translatePath(os.path.join(__path__, 'icon.png'))
OSD = xbmcgui.Dialog()
# Helpers #
def notifyOSD(header, message, icon=xbmcgui.NOTIFICATION_INFO, disp=4000, enabled=True):
if enabled:
OSD.notification(header.encode('utf-8'), message.encode('utf-8'), icon, disp)
def writeLog(message, level=xbmc.LOGNOTICE):
xbmc.log('[%s %s]: %s' % (__addonID__, __version__, message.encode('utf-8')), level)
# End Helpers #
_mdelay = int(re.match('\d+', __addon__.getSetting('mdelay')).group())
_screenrefresh = int(re.match('\d+', __addon__.getSetting('screenrefresh')).group())
writeLog('Content refresh: %s mins, screen refresh: %s mins' % (_mdelay, _screenrefresh), level=xbmc.LOGDEBUG)
class MyMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs ):
xbmc.Monitor.__init__(self)
self.settingsChanged = False
def onSettingsChanged(self):
self.settingsChanged = True
class Starter():
def __init__(self):
self.enableinfo = False
self.showOutdated = False
self.prefer_hd = True
self.mdelay = 0
self.screenrefresh = 0
def getSettings(self):
self.enableinfo = True if __addon__.getSetting('enableinfo').upper() == 'TRUE' else False
self.showOutdated = True if __addon__.getSetting('showOutdated').upper() == 'TRUE' else False
self.prefer_hd = True if __addon__.getSetting('prefer_hd').upper() == 'TRUE' else False
self.mdelay = int(re.match('\d+', __addon__.getSetting('mdelay')).group()) * 60
self.screenrefresh = int(re.match('\d+', __addon__.getSetting('screenrefresh')).group()) * 60
self.delay = int(re.match('\d+', __addon__.getSetting('delay')).group()) * 1000
self.refreshcontent = self.mdelay/self.screenrefresh
self.mincycle = int(re.match('\d+', __LS__(30151)).group()) * 60
self.poll = self.screenrefresh/self.mincycle
writeLog('Settings (re)loaded')
writeLog('Show notifications: %s' % (self.enableinfo), level=xbmc.LOGDEBUG)
writeLog('Show outdated Broadcasts: %s' % (self.showOutdated), level=xbmc.LOGDEBUG)
writeLog('Prefer HD channel: %s' % (self.prefer_hd), level=xbmc.LOGDEBUG)
writeLog('Scraper start delay: %s msecs' % (self.delay), level=xbmc.LOGDEBUG)
writeLog('Refresh interval content: %s secs' % (self.mdelay), level=xbmc.LOGDEBUG)
writeLog('Refresh interval screen: %s secs' % (self.screenrefresh), level=xbmc.LOGDEBUG)
writeLog('Refreshing multiplicator: %s' % (self.refreshcontent), level=xbmc.LOGDEBUG)
writeLog('Poll cycles: %s' % (self.poll), level=xbmc.LOGDEBUG)
if self.delay > 0:
xbmc.sleep(self.delay)
xbmc.executebuiltin('XBMC.RunScript(plugin.program.tvhighlights,"?methode=scrape_highlights")')
def start(self):
writeLog('Starting %s V.%s' % (__addonname__, __version__))
notifyOSD(__LS__(30010), __LS__(30106), __icon__, enabled=self.enableinfo)
self.getSettings()
## Thoughts: refresh = 5m; refresh-content=120 => i-max=120/5;
_c = 0
_pc = 0
monitor = MyMonitor()
while not monitor.abortRequested():
if monitor.settingsChanged:
self.getSettings()
monitor.settingsChanged = False
if monitor.waitForAbort(self.mincycle):
break
_pc += 1
if _pc < self.poll:
continue
_c += 1
_pc = 0
if _c >= self.refreshcontent:
writeLog('Scrape TV Today Highlights')
notifyOSD(__LS__(30010), __LS__(30018), __icon__, enabled=self.enableinfo)
xbmc.executebuiltin('XBMC.RunScript(plugin.program.tvhighlights,"?methode=scrape_highlights")')
_c = 0
else:
notifyOSD(__LS__(30010), __LS__(30109), __icon__, enabled=self.enableinfo)
if not self.showOutdated:
writeLog('Refresh content on home screen')
xbmc.executebuiltin('XBMC.RunScript(plugin.program.tvhighlights,"?methode=refresh_screen")')
if __name__ == '__main__':
starter = Starter()
starter.start()
del starter
| gpl-3.0 |
jtl999/certbot | certbot-nginx/certbot_nginx/tls_sni_01.py | 2 | 5689 | """A class that performs TLS-SNI-01 challenges for Nginx"""
import itertools
import logging
import os
from certbot import errors
from certbot.plugins import common
from certbot_nginx import obj
from certbot_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxTlsSni01(common.TLSSNI01):
"""TLS-SNI-01 authenticator for Nginx
:ivar configurator: NginxConfigurator object
:type configurator: :class:`~nginx.configurator.NginxConfigurator`
:ivar list achalls: Annotated
class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
challenges
:param list indices: Meant to hold indices of challenges in a
larger array. NginxTlsSni01 is capable of solving many challenges
at once which causes an indexing issue within NginxConfigurator
who must return all responses in order. Imagine NginxConfigurator
maintaining state about where all of the http-01 Challenges,
TLS-SNI-01 Challenges belong in the response array. This is an
optional utility.
:param str challenge_conf: location of the challenge config file
"""
def perform(self):
"""Perform a challenge on Nginx.
:returns: list of :class:`certbot.acme.challenges.TLSSNI01Response`
:rtype: list
"""
if not self.achalls:
return []
addresses = []
default_addr = "{0} default_server ssl".format(
self.configurator.config.tls_sni_01_port)
for achall in self.achalls:
vhost = self.configurator.choose_vhost(achall.domain)
if vhost is None:
logger.error(
"No nginx vhost exists with server_name matching: %s. "
"Please specify server_names in the Nginx config.",
achall.domain)
return None
for addr in vhost.addrs:
if addr.default:
addresses.append([obj.Addr.fromstring(default_addr)])
break
else:
addresses.append(list(vhost.addrs))
# Create challenge certs
responses = [self._setup_challenge_cert(x) for x in self.achalls]
# Set up the configuration
self._mod_config(addresses)
# Save reversible changes
self.configurator.save("SNI Challenge", True)
return responses
def _mod_config(self, ll_addrs):
"""Modifies Nginx config to include challenge server blocks.
:param list ll_addrs: list of lists of
:class:`certbot_nginx.obj.Addr` to apply
:raises .MisconfigurationError:
Unable to find a suitable HTTP block in which to include
authenticator hosts.
"""
# Add the 'include' statement for the challenges if it doesn't exist
# already in the main config
included = False
include_directive = ['\n', 'include', ' ', self.challenge_conf]
root = self.configurator.parser.loc["root"]
bucket_directive = ['\n', 'server_names_hash_bucket_size', ' ', '128']
main = self.configurator.parser.parsed[root]
for key, body in main:
if key == ['http']:
found_bucket = False
for k, _ in body:
if k == bucket_directive[1]:
found_bucket = True
if not found_bucket:
body.insert(0, bucket_directive)
if include_directive not in body:
body.insert(0, include_directive)
included = True
break
if not included:
raise errors.MisconfigurationError(
'LetsEncrypt could not find an HTTP block to include '
'TLS-SNI-01 challenges in %s.' % root)
config = [self._make_server_block(pair[0], pair[1])
for pair in itertools.izip(self.achalls, ll_addrs)]
config = nginxparser.UnspacedList(config)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with open(self.challenge_conf, "w") as new_conf:
nginxparser.dump(config, new_conf)
def _make_server_block(self, achall, addrs):
"""Creates a server block for a challenge.
:param achall: Annotated TLS-SNI-01 challenge
:type achall:
:class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
:param list addrs: addresses of challenged domain
:class:`list` of type :class:`~nginx.obj.Addr`
:returns: server block for the challenge host
:rtype: list
"""
document_root = os.path.join(
self.configurator.config.work_dir, "tls_sni_01_page")
block = [['listen', ' ', str(addr)] for addr in addrs]
block.extend([['server_name', ' ',
achall.response(achall.account_key).z_domain],
# access and error logs necessary for
# integration testing (non-root)
['access_log', ' ', os.path.join(
self.configurator.config.work_dir, 'access.log')],
['error_log', ' ', os.path.join(
self.configurator.config.work_dir, 'error.log')],
['ssl_certificate', ' ', self.get_cert_path(achall)],
['ssl_certificate_key', ' ', self.get_key_path(achall)],
[['location', ' ', '/'], [['root', ' ', document_root]]]] +
self.configurator.parser.loc["ssl_options"])
return [['server'], block]
| apache-2.0 |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py | 69 | 24593 | # Todd Miller [email protected]
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
| agpl-3.0 |
weisongchen/flaskapp | venv/lib/python2.7/site-packages/sqlalchemy/orm/events.py | 5 | 84909 | # orm/events.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
from .base import _mapper_or_none
import inspect
import weakref
from . import interfaces
from . import mapperlib, instrumentation
from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None, identifier, listen,
instrumentation._instrumentation_factory)
getattr(instrumentation._instrumentation_factory.dispatch,
identifier).remove(key)
target = weakref.ref(target.class_, remove)
event_key.\
with_dispatch_target(instrumentation._instrumentation_factory).\
with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 instance events can be associated with
unmapped superclasses of mapped classes.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if not raw:
def wrap(state, *arg, **kw):
return fn(state.obj(), *arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(
propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed within the persistence of the object.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and the values
here typically come from the process of handling an INSERT or
UPDATE, such as via the RETURNING clause or from Python-side default
values.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False, **kw)
def remove(self, event_key):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 mapper events can be associated with
unmapped superclasses of mapped classes.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if identifier in ("before_configured", "after_configured") and \
target is not mapperlib.Mapper:
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target.")
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`.orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Constrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
on a per-mapper basis as each one is configured to the extent possible.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also constrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach`
now fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result))
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result))
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'(?![0-9])', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, event_key, active_history=False,
raw=False, retval=False,
propagate=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, value, *arg):
if not raw:
target = target.obj()
if not retval:
fn(target, value, *arg)
return value
else:
return fn(target, value, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(
mgr[target.key]).base_listen(propagate=True)
def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def init_scalar(self, target, value, dict_):
"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed. A value of ``None`` is typically returned
in this case; no changes are made to the object's state.
The event handler can alter this behavior in two ways.
One is that a value other than ``None`` may be returned. The other
is that the value may be established as part of the object's state,
which will also have the effect that it is persisted.
Typical use is to establish a specific default value of an attribute
upon access::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that the value is to be persisted to the database.
**The given value is only persisted to the database if we
explicitly associate it with the object**. The ``dict_`` given
is the ``__dict__`` element of the mapped object, assuming the
default attribute instrumentation system is in place.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
When we establish the value in the given dictionary, the value will
be used in the INSERT statement established by the unit of work.
Normally, the default returned value of ``None`` is not established as
part of the object, to avoid the issue of mutations occurring to the
object in response to a normally passive "get" operation, and also
sidesteps the issue of whether or not the :meth:`.AttributeEvents.set`
event should be awkwardly fired off during an attribute access
operation. This does not impact the INSERT operation since the
``None`` value matches the value of ``NULL`` that goes into the
database in any case; note that ``None`` is skipped during the INSERT
to ensure that column and SQL-level default functions can fire off.
The attribute set event :meth:`.AttributeEvents.set` as well as the
related validation feature provided by :obj:`.orm.validates` is
**not** invoked when we apply our value to the given ``dict_``. To
have these events to invoke in response to our newly generated
value, apply the value to the given object as a normal attribute
set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
The :meth:`.AttributeEvents.init_scalar` event may be used to
extract values from the default values and/or callables established on
mapped :class:`.Column` objects. See the "active column defaults"
example in :ref:`examples_instrumentation` for an example of this.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`.RelationshipProperty.collection_class`, and will always
be empty.
:param collection_adpater: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def dispose_collection(self, target, collection, collection_adpater):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The mechanics of the event will typically include that the given
collection is empty, even if it stored objects while being replaced.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`.Query` object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`.Query` object before it is composed into a
core :class:`.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
"""
@classmethod
def _listen(
cls, event_key, retval=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
| mit |
lshain-android-source/external-chromium_org | chrome/test/functional/webrtc_call.py | 29 | 8808 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
# This little construct ensures we can run even if we have a bad version of
# psutil installed. If so, we'll just skip the test that needs it.
_HAS_CORRECT_PSUTIL_VERSION = False
try:
import psutil
if 'version_info' in dir(psutil):
# If psutil has any version info at all, it's recent enough.
_HAS_CORRECT_PSUTIL_VERSION = True
except ImportError, e:
pass
# Note: pyauto_functional must come before pyauto.
import pyauto_functional
import pyauto
import pyauto_utils
import webrtc_test_base
class WebrtcCallTest(webrtc_test_base.WebrtcTestBase):
"""Test we can set up a WebRTC call and disconnect it.
Prerequisites: This test case must run on a machine with a webcam, either
fake or real, and with some kind of audio device. You must make the
peerconnection_server target before you run.
The test case will launch a custom binary
(peerconnection_server) which will allow two WebRTC clients to find each
other. For more details, see the source code which is available at the site
http://code.google.com/p/libjingle/source/browse/ (make sure to browse to
trunk/talk/examples/peerconnection/server).
"""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.StartPeerConnectionServer()
def tearDown(self):
self.StopPeerConnectionServer()
pyauto.PyUITest.tearDown(self)
self.assertEquals('', self.CheckErrorsAndCrashes())
def _SimpleWebrtcCall(self, request_video, request_audio, duration_seconds=0):
"""Tests we can call and hang up with WebRTC.
This test exercises pretty much the whole happy-case for the WebRTC
JavaScript API. Currently, it exercises a normal call setup using the API
defined at http://dev.w3.org/2011/webrtc/editor/webrtc.html. The API is
still evolving.
The test will load the supplied HTML file, which in turn will load different
javascript files depending on which version of the signaling protocol
we are running.
The supplied HTML file will be loaded in two tabs and tell the web
pages to start up WebRTC, which will acquire video and audio devices on the
system. This will launch a dialog in Chrome which we click past using the
automation controller. Then, we will order both tabs to connect the server,
which will make the two tabs aware of each other. Once that is done we order
one tab to call the other.
We make sure that the javascript tells us that the call succeeded, lets it
run for a while and try to hang up the call after that. We verify video is
playing by using the video detector.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
duration_seconds: The number of seconds to keep the call up before
shutting it down.
"""
self._SetupCall(request_video=request_video, request_audio=request_audio)
if duration_seconds:
print 'Call up: sleeping %d seconds...' % duration_seconds
time.sleep(duration_seconds);
# The hang-up will automatically propagate to the second tab.
self.HangUp(from_tab_with_index=0)
self.WaitUntilHangUpVerified(tab_index=1)
self.Disconnect(tab_index=0)
self.Disconnect(tab_index=1)
# Ensure we didn't miss any errors.
self.AssertNoFailures(tab_index=0)
self.AssertNoFailures(tab_index=1)
def testWebrtcCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=True, request_audio=True)
def testWebrtcVideoOnlyCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=True, request_audio=False)
def testWebrtcAudioOnlyCall(self):
self.LoadTestPageInTwoTabs()
self._SimpleWebrtcCall(request_video=False, request_audio=True)
def testWebrtcJsep01CallAndMeasureCpu20Seconds(self):
if not _HAS_CORRECT_PSUTIL_VERSION:
print ('WARNING: Can not run cpu/mem measurements with this version of '
'psutil. You must have at least psutil 0.4.1 installed for the '
'version of python you are running this test with.')
return
self.LoadTestPageInTwoTabs(test_page='webrtc_jsep01_test.html')
# Prepare CPU measurements.
renderer_process = self._GetChromeRendererProcess(tab_index=0)
renderer_process.get_cpu_percent()
self._SimpleWebrtcCall(request_video=True,
request_audio=True,
duration_seconds=20)
cpu_usage = renderer_process.get_cpu_percent(interval=0)
mem_usage_bytes = renderer_process.get_memory_info()[0]
mem_usage_kb = float(mem_usage_bytes) / 1024
pyauto_utils.PrintPerfResult('cpu', 'jsep01_call', cpu_usage, '%')
pyauto_utils.PrintPerfResult('memory', 'jsep01_call', mem_usage_kb, 'KiB')
def testLocalPreview(self):
"""Brings up a local preview and ensures video is playing.
This test will launch a window with a single tab and run a getUserMedia call
which will give us access to the webcam and microphone. Then the javascript
code will hook up the webcam data to the local-view video tag. We will
detect video in that tag using the video detector, and if we see video
moving the test passes.
"""
self.LoadTestPageInOneTab()
self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
self._StartDetectingVideo(tab_index=0, video_element='local-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
def testHandlesNewGetUserMediaRequestSeparately(self):
"""Ensures WebRTC doesn't allow new requests to piggy-back on old ones."""
self.LoadTestPageInTwoTabs()
self.GetUserMedia(tab_index=0)
self.GetUserMedia(tab_index=1)
self.Connect("user_1", tab_index=0)
self.Connect("user_2", tab_index=1)
self.CreatePeerConnection(tab_index=0)
self.AddUserMediaLocalStream(tab_index=0)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
self.assertEquals('failed-with-error-PERMISSION_DENIED',
self.GetUserMedia(tab_index=0, action='cancel'))
self.assertEquals('failed-with-error-PERMISSION_DENIED',
self.GetUserMedia(tab_index=0, action='dismiss'))
def _SetupCall(self, request_video, request_audio):
"""Gets user media and establishes a call.
Assumes that two tabs are already opened with a suitable test page.
Args:
request_video: Whether to request video.
request_audio: Whether to request audio.
"""
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=0, request_video=request_video, request_audio=request_audio))
self.assertEquals('ok-got-stream', self.GetUserMedia(
tab_index=1, request_video=request_video, request_audio=request_audio))
self.Connect('user_1', tab_index=0)
self.Connect('user_2', tab_index=1)
self.CreatePeerConnection(tab_index=0)
self.AddUserMediaLocalStream(tab_index=0)
self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
if request_video:
self._StartDetectingVideo(tab_index=0, video_element='remote-view')
self._StartDetectingVideo(tab_index=1, video_element='remote-view')
self._WaitForVideo(tab_index=0, expect_playing=True)
self._WaitForVideo(tab_index=1, expect_playing=True)
def _StartDetectingVideo(self, tab_index, video_element):
self.assertEquals('ok-started', self.ExecuteJavascript(
'startDetection("%s", "frame-buffer", 320, 240)' % video_element,
tab_index=tab_index));
def _WaitForVideo(self, tab_index, expect_playing):
# TODO(phoglund): Remove this hack if we manage to get a more stable Linux
# bot to run these tests.
if self.IsLinux():
print "Linux; pretending to wait for video..."
time.sleep(1)
return
expect_retval='video-playing' if expect_playing else 'video-not-playing'
video_playing = self.WaitUntil(
function=lambda: self.ExecuteJavascript('isVideoPlaying()',
tab_index=tab_index),
expect_retval=expect_retval)
self.assertTrue(video_playing,
msg= 'Timed out while waiting for isVideoPlaying to ' +
'return ' + expect_retval + '.')
def _GetChromeRendererProcess(self, tab_index):
"""Returns the Chrome renderer process as a psutil process wrapper."""
tab_info = self.GetBrowserInfo()['windows'][0]['tabs'][tab_index]
renderer_id = tab_info['renderer_pid']
if not renderer_id:
self.fail('Can not find the tab renderer process.')
return psutil.Process(renderer_id)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
drexly/openhgsenti | lib/django/forms/fields.py | 42 | 47141 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import uuid
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.deprecation import (
RemovedInDjango110Warning, RenameMethodsBase,
)
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class RenameFieldMethods(RenameMethodsBase):
renamed_methods = (
('_has_changed', 'has_changed', RemovedInDjango110Warning),
)
class Field(six.with_metaclass(RenameFieldMethods, object)):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, disabled=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
initial_value = self._coerce(initial_value)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, strip=True, *args, **kwargs):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
value = force_text(value)
if self.strip:
value = value.strip()
return value
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(value)
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
kwargs.setdefault('strip', False)
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango110Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *args, **kwargs):
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
| apache-2.0 |
algorhythms/LeetCode | 527 Word Abbreviation.py | 1 | 1679 | #!/usr/bin/python3
"""
premium question
"""
from typing import List
from collections import defaultdict
class Solution:
def wordsAbbreviation(self, words: List[str]) -> List[str]:
"""
Sort the word, check prefix and last word
Group by first and last char, group by prefix and last char
then make a trie - hard to implement? TrieNode lambda
Need to count the #appearances in the TrieNode
"""
hm = defaultdict(list)
ret = [None for _ in words]
for i, w in enumerate(words):
hm[w[0], w[-1], len(w)].append(i)
TrieNode = lambda: defaultdict(TrieNode)
for lst in hm.values():
root = TrieNode()
for i in lst:
w = words[i]
cur = root
for c in w:
cur = cur[c]
cur["count"] = cur.get("count", 0) + 1
for i in lst:
w = words[i]
prefix_l = 0
cur = root
for c in w:
prefix_l += 1
cur = cur[c]
if cur["count"] == 1:
break
ret[i] = self.abbrev(w, prefix_l)
return ret
def abbrev(self, w, prefix_l):
abbrev_l = len(w) - 2 - prefix_l + 1
if abbrev_l > 1:
return w[:prefix_l] + str(abbrev_l) + w[-1]
return w
if __name__ == "__main__":
assert Solution().wordsAbbreviation(["like", "god", "internal", "me", "internet", "interval", "intension", "face", "intrusion"]) == ["l2e","god","internal","me","i6t","interval","inte4n","f2e","intr4n"]
| mit |
katrielalex/rate-equations-odeint | redirect_std.py | 1 | 1258 | # http://stackoverflow.com/a/22434262/398968
import os
import sys
import contextlib
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextlib.contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(to), stdout_fd) # $ exec >&to
except ValueError: # filename
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd) # $ exec > to
try:
yield stdout # allow code to be run with the redirected stdout
finally:
# restore stdout to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
stdout.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
| gpl-3.0 |
bincentvaret/bsd-cloudinit | cloudbaseinit/tests/plugins/common/userdataplugins/test_heat.py | 4 | 2897 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo.config import cfg
from cloudbaseinit.plugins.common.userdataplugins import heat
CONF = cfg.CONF
class HeatUserDataHandlerTests(unittest.TestCase):
def setUp(self):
self._heat = heat.HeatPlugin()
@mock.patch('os.path.exists')
@mock.patch('os.makedirs')
@mock.patch('os.path.dirname')
def test_check_heat_config_dir(self, mock_dirname, mock_makedirs,
mock_exists):
mock_exists.return_value = False
fake_path = mock.sentinel.fake_path
fake_dir = mock.sentinel.fake_dir
mock_dirname.return_value = fake_dir
self._heat._check_dir(file_name=fake_path)
mock_dirname.assert_called_once_with(fake_path)
mock_exists.assert_called_once_with(fake_dir)
mock_makedirs.assert_called_once_with(fake_dir)
@mock.patch('cloudbaseinit.plugins.common.userdatautils'
'.execute_user_data_script')
@mock.patch('cloudbaseinit.plugins.common.userdataplugins.heat'
'.HeatPlugin._check_dir')
@mock.patch('cloudbaseinit.utils.encoding.write_file')
def _test_process(self, mock_write_file, mock_check_dir,
mock_execute_user_data_script, filename):
mock_part = mock.MagicMock()
mock_part.get_filename.return_value = filename
response = self._heat.process(mock_part)
path = os.path.join(CONF.heat_config_dir, filename)
mock_check_dir.assert_called_once_with(path)
mock_part.get_filename.assert_called_with()
mock_write_file.assert_called_once_with(
path, mock_part.get_payload.return_value)
if filename == self._heat._heat_user_data_filename:
mock_execute_user_data_script.assert_called_with(
mock_part.get_payload())
self.assertEqual(mock_execute_user_data_script.return_value,
response)
else:
self.assertTrue(response is None)
def test_process(self):
self._test_process(filename=self._heat._heat_user_data_filename)
def test_process_content_other_data(self):
self._test_process(filename='other data')
| apache-2.0 |
ksetyadi/Sahana-Eden | models/06_supply.py | 1 | 5661 | # -*- coding: utf-8 -*-
"""
Supply
@author: Michael Howden ([email protected])
@date-created: 2010-08-16
Generic Supply functionality such as catalogs and items that will be used across multiple modules
"""
module = "supply"
if deployment_settings.has_module("logs"):
#==============================================================================
# Item Category
#
resourcename = "item_category"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
comments(),
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_ITEM_CATEGORY = T("Add Item Category")
LIST_ITEM_CATEGORIES = T("List Item Categories")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ITEM_CATEGORY,
title_display = T("Item Category Details"),
title_list = LIST_ITEM_CATEGORIES,
title_update = T("Edit Item Category"),
title_search = T("Search Item Categories"),
subtitle_create = T("Add New Item Category"),
subtitle_list = T("Item Categories"),
label_list_button = LIST_ITEM_CATEGORIES,
label_create_button = ADD_ITEM_CATEGORY,
label_delete_button = T("Delete Item Category"),
msg_record_created = T("Item Category added"),
msg_record_modified = T("Item Category updated"),
msg_record_deleted = T("Item Category deleted"),
msg_list_empty = T("No Item Categories currently registered"))
# Reusable Field
item_category_id = S3ReusableField("item_category_id", db.supply_item_category, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "supply_item_category.id", "%(name)s", sort=True)),
represent = lambda id: shn_get_db_field_value(db=db, table="supply_item_category", field="name", look_up=id),
label = T("Category"),
comment = DIV( _class="tooltip", _title=T("Item Category") + "|" + T("The list of Item categories are maintained by the Administrators.")),
#comment = DIV(A(ADD_ITEM_CATEGORY, _class="colorbox", _href=URL(r=request, c="supply", f="item_category", args="create", vars=dict(format="popup")), _target="top", _title=ADD_ITEM_CATEGORY),
# DIV( _class="tooltip", _title=T("Item Category") + "|" + T("The category of the Item."))),
ondelete = "RESTRICT"
)
#==============================================================================
# Units
#
logs_unit_opts = {
"piece" : T("piece"),
"kit" : T("kit"),
"sack50kg" : T("sack 50kg"),
"sack20kg" : T("sack 20kg"),
"pack10" : T("pack of 10"),
"m" : T("meter"),
"m3" : T("meter cubed"),
"l" : T("liter"),
"kg" : T("kilogram"),
"ton" : T("ton"),
}
#==============================================================================
# Item
#
resourcename = "item"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
item_category_id(),
Field("name", length=128, notnull=True, unique=True),
Field("unit", notnull=True, default="piece",
requires = IS_IN_SET(logs_unit_opts, zero=None),
represent = lambda opt: logs_unit_opts.get(opt, T("not specified"))
),
comments(), # These comments do *not* pull through to an Inventory's Items or a Request's Items
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_ITEM = T("Add Item")
LIST_ITEMS = T("List Items")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ITEM,
title_display = T("Item Details"),
title_list = LIST_ITEMS,
title_update = T("Edit Item"),
title_search = T("Search Items"),
subtitle_create = T("Add New Item"),
subtitle_list = T("Items"),
label_list_button = LIST_ITEMS,
label_create_button = ADD_ITEM,
label_delete_button = T("Delete Item"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"))
def shn_item_represent(id):
record = db(db.supply_item.id == id).select(db.supply_item.name,
db.supply_item.unit,
limitby=(0, 1)).first()
if not record:
return NONE
elif not record.unit:
return record.name
else:
item_represent = "%s (%s)" % (record.name, record.unit)
return item_represent
# Reusable Field
item_id = S3ReusableField("item_id", db.supply_item, sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "supply_item.id", "%(name)s (%(unit)s)", sort=True)),
represent = shn_item_represent,
label = T("Item"),
comment = DIV(A(ADD_ITEM, _class="colorbox", _href=URL(r=request, c="supply", f="item", args="create", vars=dict(format="popup")), _target="top", _title=ADD_ITEM),
DIV( _class="tooltip", _title=T("Relief Item") + "|" + T("Add a new Relief Item."))),
ondelete = "RESTRICT"
)
| mit |
osamak/student-portal | activities/migrations/0023_invitation.py | 2 | 2488 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('activities', '0022_depository'),
]
operations = [
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(default=b'', max_length=100, verbose_name='\u0627\u0644\u0627\u0633\u0645')),
('background', models.ImageField(null=True, upload_to=b'invitations/backgrounds/', blank=True)),
('logo', models.ImageField(null=True, upload_to=b'invitations/backgrounds/', blank=True)),
('short_description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0642\u0635\u064a\u0631')),
('full_description', models.TextField(verbose_name='\u0648\u0635\u0641 \u0642\u0635\u064a\u0631')),
('hashtag', models.CharField(default=b'', help_text=b'\xd8\xa8\xd8\xaf\xd9\x88\xd9\x86 #', max_length=20, verbose_name='\u0647\u0627\u0634\u062a\u0627\u063a', blank=True)),
('publication_date', models.DateTimeField(verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0646\u0634\u0631', blank=True)),
('location', models.CharField(default=b'', max_length=200, verbose_name='\u0627\u0644\u0645\u0643\u0627\u0646')),
('date', models.DateField(verbose_name='\u0627\u0644\u062a\u0627\u0631\u064a\u062e')),
('start_time', models.TimeField(verbose_name='\u0648\u0642\u062a \u0627\u0644\u0628\u062f\u0627\u064a\u0629')),
('end_time', models.TimeField(verbose_name='\u0648\u0642\u062a \u0627\u0644\u0646\u0647\u0627\u064a\u0629')),
('submission_date', models.DateTimeField(auto_now_add=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u0625\u0631\u0633\u0627\u0644')),
('edit_date', models.DateTimeField(auto_now=True, verbose_name='\u062a\u0627\u0631\u064a\u062e \u0627\u0644\u062a\u0639\u062f\u064a\u0644')),
('activity', models.ForeignKey(blank=True, to='activities.Activity', null=True)),
('students', models.ManyToManyField(to=settings.AUTH_USER_MODEL, blank=True)),
],
),
]
| agpl-3.0 |
falcondai/svcrawl | models.py | 1 | 2640 | # description: download Google StreetViews images and save them
# the Google StreeView API is documented here:
# https://developers.google.com/maps/documentation/streetview/
# author: Falcon Dai
import cStringIO
import mongoengine as me
from PIL import Image
from google_streetview_api import *
class Pano(me.Document):
location = me.StringField()
longlat = me.PointField(auto_index=True) # note that the coordinates are (long, lat) pairs
heading = me.FloatField(default=None)
fov = me.FloatField(default=90)
pitch = me.FloatField(default=0)
pano_id = me.StringField()
image = me.ImageField()
meta = {
'indexes': ['pano_id']
}
@property
def size(self):
return self.image.size
@property
def url(self):
loc = self.location or self.longlat
return generate_pano_url(loc, self.heading, self.fov, self.pitch, self.size, self.pano_id)
@property
def image_md5(self):
'''return the md5 hash of the stored image'''
if self.image:
return self.image.md5
return None
@property
def PIL_image(self):
if hasattr(self, '_PIL_image'):
return self._PIL_image
self._PIL_image = Image.open(cStringIO.StringIO(self.image.read()))
return self._PIL_image
@property
def has_image(self):
'''return False if the image is a null image'''
return self.image_md5 != no_image_md5
def show_image(self):
return self.PIL_image.show()
def __unicode__(self):
return 'location=%r, longlat=%r, image_md5=%r' % (self.location, self.longlat['coordinates'], self.image_md5)
@staticmethod
def new_pano(location=None, heading=None, fov=90, pitch=0, size=(640, 640), pano_id=None, key=None):
image = cStringIO.StringIO(get_pano(location, heading, fov, pitch, size, pano_id, key))
params = dict(heading=heading, fov=fov, pitch=pitch, size=size, pano_id=pano_id, image=image)
if isinstance(location, str):
pano = Pano(location=location, **params)
else:
# location is provided as a (long, lat) pair
pano = Pano(longlat=location, **params)
return pano
if __name__ == '__main__':
print 'testing...'
print generate_pano_url((-0.0004797, 51.4769351))
me.connect('test')
for l in [(0, 0), (-0.0004797, 51.4769351), 'downtown chicago', 'Golden Gate Bridge', 'Big Ben', 'Empire State Building', 'White House']:
p = Pano.new_pano(l, fov=120)
p.show_image()
print repr(p), p.has_image
p.save()
Pano.drop_collection()
| mit |
muntasirsyed/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/plugins/do_symfix.py | 102 | 1894 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Command line debugger using WinAppDbg
# Fix the symbol store path
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
def do(self, arg):
".symfix - Set the default Microsoft Symbol Store settings if missing"
self.debug.system.fix_symbol_store_path(remote = True, force = False)
| apache-2.0 |
diego-d5000/MisValesMd | env/lib/site-packages/pip/_vendor/colorama/win32.py | 446 | 5121 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| mit |
epage/telepathy-python | src/client/__init__.py | 2 | 1217 | # telepathy-python - Base classes defining the interfaces of the Telepathy framework
#
# Copyright (C) 2005, 2006 Collabora Limited
# Copyright (C) 2005, 2006 Nokia Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from telepathy.client.interfacefactory import InterfaceFactory
from telepathy.client.managerregistry import ManagerRegistry
from telepathy.client.connmgr import ConnectionManager
from telepathy.client.conn import Connection
from telepathy.client.channel import Channel
from telepathy import version, __version__
| lgpl-2.1 |
jdduke/googletest | test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
mike10004/adventofcode2016 | advent14_otp.py | 1 | 6772 | #!/usr/bin/env python
import sys
import re
import hashlib
import logging
from collections import defaultdict
_log = logging.getLogger('a14otp')
_PATT3 = re.compile(r'(?:(\w)\1\1)')
_PATT5 = re.compile(r'(?:(\w)\1\1\1\1)')
_MAX_CURSOR = 10000000 # 10 million
def clean(items, predicate):
assert isinstance(items, list) or isinstance(items, dict)
keys = xrange(len(items)) if isinstance(items, list) else items.keys()
indices = []
for i in keys:
if not predicate(items[i]):
indices.append(i)
indices.sort(reverse=True)
for i in indices:
del items[i]
return items
def get_triplet_char(hsh):
m = _PATT3.search(hsh)
assert m is not None, "no triplets: " + hsh
return m.group(1)
class CursorError(ValueError):
pass
def default_hasher(salt, index, stretch):
if index > _MAX_CURSOR:
raise CursorError("%d > %s" % (index, _MAX_CURSOR))
h = salt + str(index)
for i in xrange(1 + stretch):
hasher = hashlib.md5()
hasher.update(h)
h = hasher.hexdigest()
return h
class BuffStream(object):
def __init__(self, salt, stretch, cursor=0, memory=1000, hasher=default_hasher):
self.salt = salt
self.hasher = hasher
self.cursor = cursor
self.memory = memory
assert memory > 0, "memory must be > 0"
self.hashes = defaultdict(lambda: None)
self.quints = defaultdict(list) # ch -> index of hashes
self.stopped = False
self.stretch = stretch
self._prepare()
def _clean(self):
young = lambda i: i >= self.cursor
clean(self.hashes, young)
for ch in self.quints:
clean(self.quints[ch], young)
clean(self.quints, lambda l: len(l) > 0)
def _add_hash(self, index):
h = self.hasher(self.salt, index, self.stretch)
m = _PATT3.search(h)
if m is not None:
self.hashes[index] = h
for g in _PATT5.findall(h):
self.quints[g].append(index)
def _prepare(self):
for i in xrange(self.cursor, self.memory):
self._add_hash(i)
def next(self):
good = None
while good is None and not self.stopped:
current = self.hashes[self.cursor]
self.cursor += 1
self._clean()
next_index = self.cursor - 1 + self.memory
try:
self._add_hash(next_index)
except CursorError as e:
_log.debug("tried to add hash %d: %s", next_index, e)
self.stopped = True
if current is not None: # it's a triplet
ch = get_triplet_char(current)
if ch in self.quints:
good = current
return good
def prefab_hasher(hashes, max_cursor=None):
if max_cursor is None:
max_cursor = len(hashes) - 1
def nexthash(salt, index, stretch): # pylint: disable=unused-argument
if index > max_cursor:
raise CursorError("index %d for hashes %s" % (index, str(hashes)))
return hashes[index]
return nexthash
def generate(s, nkeys, action=lambda x, y, z: None):
otpkeys = []
while len(otpkeys) < nkeys:
key = s.next()
index = s.cursor - 1
otpkeys.append((index, key))
action(len(otpkeys), index, key)
return otpkeys
def test_stream(memory, hashes, *expected):
s = BuffStream('salt', 0, memory=memory, hasher=prefab_hasher(hashes + list(['$'] * (memory + 1))))
for i in xrange(len(expected)):
actual = s.next()
if expected[i] != actual:
print >> sys.stderr, "expected next() call %d == %s but was %s (cursor=%d, memory=%d in %s)" % (i+1, expected[i], actual, s.cursor, s.memory, str(hashes))
sys.exit(2)
def test(skip_parts=()):
# pylint: disable=protected-access
# assert _PATT3.search('abcddd13d5ba') is not None
# assert _PATT5.findall('35aaaaa293bc9') == ['a']
# assert _PATT5.findall('35ddddd293bc9eeeeeee124fff0ggg9hhhhh') == ['d', 'e', 'h']
assert clean([], lambda x: False) == []
assert clean([], lambda x: True) == []
assert clean([1, 2, 3], lambda x: False) == []
assert clean([1, 2, 3], lambda x: True) == [1, 2, 3]
assert clean(['a', 'bb', 'c', 'ddd'], lambda c: len(c) > 1) == ['bb', 'ddd']
assert clean({'a': 100, 'b': 101, 'c': 102, 'd': 103}, lambda v: v % 2 == 0) == {'a': 100, 'c': 102}
test_stream(1, ['a', 'b', 'ccc', 'ccccc'], 'ccc')
test_stream(1, ['aaa', 'b', 'ccc', 'ccccc'], 'ccc')
test_stream(1, ['0', 'aaa', 'aaaaa', '1', 'bbb', 'bbbbb'], 'aaa', 'bbb')
test_stream(1, ['aaa1', 'aaaaa2', 'aaaaa3'], 'aaa1', 'aaaaa2')
test_stream(1, ['a', 'b', 'c', 'c', 'd', 'e'], None)
test_stream(1, ['a', 'bbb', 'c', 'bbbbb', 'd'], None)
test_stream(2, ['aaa', 'aaaaa'], 'aaa')
test_stream(2, ['aaa', 'x', 'aaaaa'], 'aaa')
test_stream(2, ['aaa', 'x', 'y', 'aaaaa'], None)
test_stream(2, ['a', 'b', 'ccc', 'd', 'ccccc', 'e', 'f', 'ggg', 'ggggg', 'h'], 'ccc', 'ggg')
test_stream(3, ['aaa', 'b', 'c', 'aaaaa'], 'aaa')
if '1' not in skip_parts:
# part one
s = BuffStream('abc', 0)
keys = generate(s, 64)
first_index, first = keys[0]
assert first_index == 39, "first index %d != 39" % first_index
assert first == '347dac6ee8eeea4652c7476d0f97bee5', "first = %s, cursor = %d" % (first, s.cursor)
last_index = keys[63][0]
assert last_index == 22728, "last index %d != 22728"
if '2' not in skip_parts:
s = BuffStream('abc', 2016)
keys = generate(s, 64)
first_index, first = keys[0]
assert first_index == 10, "first index %d != 39" % first_index
# assert first == '347dac6ee8eeea4652c7476d0f97bee5', "first = %s, cursor = %d" % (first, s.cursor)
last_index = keys[63][0]
assert last_index == 22551, "last index %d != 22728"
def main(pargs):
test(() if pargs.skip_part is None else pargs.skip_part.split(','))
s = BuffStream(pargs.salt, pargs.stretch)
def printer(nkeys, index, key):
print "%2d %5d %s" % (nkeys, index, key)
generate(s, 64, printer)
return 0
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('salt')
p.add_argument('--keys', default=64, metavar='N')
p.add_argument('--skip-part', choices=('1', '2', '1,2', '2,1'))
p.add_argument('--stretch', metavar="N", default=2016, help="stretch hashes by rehashing N times")
p.add_argument('--log-level', choices=('DEBUG', 'INFO', 'WARN', 'ERROR'), default='INFO')
args = p.parse_args()
logging.basicConfig(level=eval('logging.' + args.log_level))
sys.exit(main(args))
| mit |
sdanielf/dictate | setup.py | 1 | 5131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 S. Daniel Francis <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from distutils.core import setup
from distutils.command.build import build
from distutils.core import Command
import os
srcdir = os.path.dirname(os.path.abspath(__file__))
docdir = os.path.join(srcdir, 'doc')
docgettextdir = os.path.join(docdir, 'gettext')
mandir = os.path.join(docdir, 'man')
data_files = []
class build_manpage(Command):
description = 'Generate man pages.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for i in os.listdir(docgettextdir) + ['man1.1']:
name, ext = i.split('.')
if ext != 'pot' and name:
build_dir = os.path.join(mandir, name)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
langopt = ('-Dlanguage=%s' % name) if name != 'man1' else ''
print 'Generating %s/dictate.1.gz' % build_dir
os.system('sphinx-build -b man %s %s %s' %
(langopt, docdir, build_dir))
if os.path.exists('%s/dictate.1.gz' % build_dir):
os.remove('%s/dictate.1.gz' % build_dir)
os.system('gzip %s/*.1' % build_dir)
self.install_man('doc/man')
def install_man(self, directory):
for i in os.listdir(directory):
path = os.path.join(directory, i)
if os.path.isdir(path) and i != '.doctrees':
install_path = os.path.join('share', 'man', i, 'man1')
if i == 'man1':
install_path = os.path.join('share', 'man', 'man1')
files = []
for filename in os.listdir(path):
if filename.split('.')[-1] == 'gz':
files.append(os.path.join(path, filename))
data_files.append((install_path, files))
class build_trans(Command):
description = 'Compile .po files into .mo files'
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._srcdir = os.path.join(os.path.abspath(os.curdir))
translations = [(os.path.join(self._srcdir, 'po'),
os.path.join(self._srcdir, 'locale'), 'dictate'),
(os.path.join(self._srcdir, 'doc', 'gettext'),
os.path.join(self._srcdir, 'doc', 'locale'),
'index')]
for po_dir, locale_dir, module in translations:
os.system('%s/i18nhelpers/buildmo.py %s %s %s' %
(srcdir, po_dir, locale_dir, module))
self.append_mo(translations[0][1])
def append_mo(self, directory):
for lang in os.listdir(directory):
lang_dir = os.path.join('share', 'locale', lang,
'LC_MESSAGES')
lang_file = os.path.join(self._srcdir, 'locale', lang,
'LC_MESSAGES', 'dictate.mo')
data_files.append((lang_dir, [lang_file]))
build.sub_commands.append(('build_trans', None))
build.sub_commands.append(('build_manpage', None))
setup(name='dictate',
version='0.3',
description='Command-line dictation utility.',
author='Daniel Francis',
author_email='[email protected]',
license='GPLv3',
url='https://github.com/sdanielf/dictate/',
packages=['dictation'],
scripts=['dictate'],
cmdclass={'build_manpage': build_manpage,
'build_trans': build_trans},
data_files=data_files,
long_description="""Dictation is an eSpeak-based dictation utility.
It reads a text slowly, allowing users to write it. Also can pause the
dictation, spell difficult words and identify punctuation marks.""",
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Education',
'License :: OSI Approved :: GNU General Public License v3 \
or later (GPLv3+)',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Education',
'Topic :: Multimedia :: Sound/Audio :: Speech',
'Topic :: Utilities'])
| gpl-3.0 |
mhoffman/kmos | kmos/cli.py | 1 | 16514 | #!/usr/bin/env python
"""Entry point module for the command-line
interface. The kmos executable should be
on the program path, import this modules
main function and run it.
To call kmos command as you would from the shell,
use ::
kmos.cli.main('...')
Every command can be shortened as long as it is non-ambiguous, e.g. ::
kmos ex <xml-file>
instead of ::
kmos export <xml-file>
etc.
"""
# Copyright 2009-2013 Max J. Hoffmann ([email protected])
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
usage = {}
usage['all'] = """kmos help all
Display documentation for all commands.
"""
usage['benchmark'] = """kmos benchmark
Run 1 mio. kMC steps on model in current directory
and report runtime.
"""
usage['build'] = """kmos build
Build kmc_model.%s from *f90 files in the
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['help'] = """kmos help <command>
Print usage information for the given command.
"""
usage['export'] = """kmos export <xml-file> [<export-path>]
Take a kmos xml-file and export all generated
source code to the export-path. There try to
build the kmc_model.%s.
Additional Parameters ::
-s/--source-only
Export source only and don't build binary
-b/--backend (local_smart|lat_int)
Choose backend. Default is "local_smart".
lat_int is EXPERIMENTAL and not made
for production, yet.
-d/--debug
Turn on assertion statements in F90 code.
(Only active in compile step)
--acf
Build the modules base_acf.f90 and proclist_acf.f90. Default is false.
This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement).
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>]
Take a kmos xml-file and export kmc_settings.py
to the export-path.
"""
usage['edit'] = """kmos edit <xml-file>
Open the kmos xml-file in a GUI to edit
the model.
"""
usage['import'] = """kmos import <xml-file>
Take a kmos xml-file and open an ipython shell
with the project_tree imported as pt.
"""
usage['rebuild'] = """kmos rebuild
Export code and rebuild binary module from XML
information included in kmc_settings.py in
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
"""
usage['shell'] = """kmos shell
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['run'] = """kmos run
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['version'] = """kmos version
Print version number and exit.
"""
usage['view'] = """kmos view
Take a kmc_model.%s and kmc_settings.py in the
same directory and start to simulate the
model visually.
Additional Parameters ::
-v/--steps-per-frame <number>
Number of steps per frame
""" % ('pyd' if os.name == 'nt' else 'so')
usage['xml'] = """kmos xml
Print xml representation of model to stdout
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import kmos
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=kmos.__version__)
parser.add_option('-s', '--source-only',
dest='source_only',
action='store_true',
default=False)
parser.add_option('-p', '--path-to-f2py',
dest='path_to_f2py',
default='f2py')
parser.add_option('-b', '--backend',
dest='backend',
default='local_smart')
parser.add_option('-a', '--avoid-default-state',
dest='avoid_default_state',
action='store_true',
default=False,
)
parser.add_option('-v', '--steps-per-frame',
dest='steps_per_frame',
type='int',
default='50000')
parser.add_option('-d', '--debug',
default=False,
dest='debug',
action='store_true')
parser.add_option('-n', '--no-compiler-optimization',
default=False,
dest='no_optimize',
action='store_true')
parser.add_option('-o', '--overwrite',
default=False,
action='store_true')
parser.add_option('-l', '--variable-length',
dest='variable_length',
default=95,
type='int')
parser.add_option('-c', '--catmap',
default=False,
action='store_true')
parser.add_option('--acf',
dest='acf',
action='store_true',
default=False,
)
try:
from numpy.distutils.fcompiler import get_default_fcompiler
from numpy.distutils import log
log.set_verbosity(-1, True)
fcompiler = get_default_fcompiler()
except:
fcompiler = 'gfortran'
parser.add_option('-f', '--fcompiler',
dest='fcompiler',
default=os.environ.get('F2PY_FCOMPILER', fcompiler))
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ kmos <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
global model, pt, np, cm_model
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
if args[0] == 'benchmark':
from sys import path
path.append(os.path.abspath(os.curdir))
nsteps = 1000000
from time import time
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
time0 = time()
try:
model.proclist.do_kmc_steps(nsteps)
except: # kmos < 0.3 had no model.proclist.do_kmc_steps
model.do_steps(nsteps)
needed_time = time() - time0
print('Using the [%s] backend.' % model.get_backend())
print('%s steps took %.2f seconds' % (nsteps, needed_time))
print('Or %.2e steps/s' % (1e6 / needed_time))
model.deallocate()
elif args[0] == 'build':
from kmos.utils import build
build(options)
elif args[0] == 'edit':
from kmos import gui
gui.main()
elif args[0] == 'settings-export':
import kmos.types
import kmos.io
from kmos.io import ProcListWriter
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = args[2]
project = kmos.types.Project()
project.import_file(xml_file)
writer = ProcListWriter(project, export_dir)
writer.write_settings()
elif args[0] == 'export':
import kmos.types
import kmos.io
from kmos.utils import build
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = os.path.join(args[2], 'src')
project = kmos.types.Project()
project.import_file(xml_file)
project.shorten_names(max_length=options.variable_length)
kmos.io.export_source(project,
export_dir,
options=options)
if ((os.name == 'posix'
and os.uname()[0] in ['Linux', 'Darwin'])
or os.name == 'nt') \
and not options.source_only:
os.chdir(export_dir)
build(options)
for out in glob('kmc_*'):
if os.path.exists('../%s' % out) :
if options.overwrite :
overwrite = 'y'
else:
overwrite = raw_input(('Should I overwrite existing %s ?'
'[y/N] ') % out).lower()
if overwrite.startswith('y') :
print('Overwriting {out}'.format(**locals()))
os.remove('../%s' % out)
shutil.move(out, '..')
else :
print('Skipping {out}'.format(**locals()))
else:
shutil.move(out, '..')
elif args[0] == 'settings-export':
import kmos.io
pt = kmos.io.import_file(args[1])
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting kmc_settings.py to %s'
% out_dir)
args.append(out_dir)
if not os.path.exists(args[2]):
os.mkdir(args[2])
elif not os.path.isdir(args[2]):
raise UserWarning("Cannot overwrite %s; Exiting;" % args[2])
writer = kmos.io.ProcListWriter(pt, args[2])
writer.write_settings()
elif args[0] == 'help':
if len(args) < 2:
parser.error('Which help do you want?')
if args[1] == 'all':
for command in sorted(usage):
print(usage[command])
elif args[1] in usage:
print('Usage: %s\n' % usage[args[1]])
else:
arg = match_keys(args[1], usage, parser)
print('Usage: %s\n' % usage[arg])
elif args[0] == 'import':
import kmos.io
if not len(args) >= 2:
raise UserWarning('XML file name expected.')
pt = kmos.io.import_xml_file(args[1])
if len(args) == 2:
sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1])
elif len(args) == 3: # if optional 3rd argument is given, store model there and exit
pt.save(args[2])
elif args[0] == 'rebuild':
from time import sleep
print('Will rebuild model from kmc_settings.py in current directory')
print('Please do not interrupt,'
' build process, as you will most likely')
print('loose the current model files.')
sleep(2.)
from sys import path
path.append(os.path.abspath(os.curdir))
from tempfile import mktemp
if not os.path.exists('kmc_model.so') \
and not os.path.exists('kmc_model.pyd'):
raise Exception('No kmc_model.so found.')
if not os.path.exists('kmc_settings.py'):
raise Exception('No kmc_settings.py found.')
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
tempfile = mktemp()
f = file(tempfile, 'w')
f.write(model.xml())
f.close()
for kmc_model in glob('kmc_model.*'):
os.remove(kmc_model)
os.remove('kmc_settings.py')
main('export %s -b %s .' % (tempfile, options.backend))
os.remove(tempfile)
model.deallocate()
elif args[0] in ['run', 'shell']:
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
# useful to have in interactive mode
import numpy as np
try:
from matplotlib import pyplot as plt
except:
plt = None
if options.catmap:
import catmap
import catmap.cli.kmc_runner
seed = catmap.cli.kmc_runner.get_seed_from_path('.')
cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals()))
catmap_message = '\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file="{seed}.mkm")'.format(**locals())
else:
catmap_message = ''
try:
model = KMC_Model(print_rates=False)
except:
print("Warning: could not import kmc_model!"
" Please make sure you are in the right directory")
sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals()))
try:
model.deallocate()
except:
print("Warning: could not deallocate model. Was is allocated?")
elif args[0] == 'version':
from kmos import VERSION
print(VERSION)
elif args[0] == 'view':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos import view
view.main(steps_per_frame=options.steps_per_frame)
elif args[0] == 'xml':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
model = KMC_Model(banner=False, print_rates=False)
print(model.xml())
else:
parser.error('Command "%s" not understood.' % args[0])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
| gpl-3.0 |
cccfran/sympy | sympy/concrete/tests/test_sums_products.py | 4 | 30439 | from sympy import (
Abs, And, binomial, Catalan, cos, Derivative, E, Eq, exp, EulerGamma,
factorial, Function, harmonic, I, Integral, KroneckerDelta, log,
nan, Ne, Or, oo, pi, Piecewise, Product, product, Rational, S, simplify,
sqrt, Sum, summation, Symbol, symbols, sympify, zeta, gamma, Le
)
from sympy.abc import a, b, c, d, f, k, m, x, y, z
from sympy.concrete.summations import telescopic
from sympy.utilities.pytest import XFAIL, raises
from sympy import simplify
n = Symbol('n', integer=True)
def test_karr_convention():
# Test the Karr summation convention that we want to hold.
# See his paper "Summation in Finite Terms" for a detailed
# reasoning why we really want exactly this definition.
# The convention is described on page 309 and essentially
# in section 1.4, definition 3:
#
# \sum_{m <= i < n} f(i) 'has the obvious meaning' for m < n
# \sum_{m <= i < n} f(i) = 0 for m = n
# \sum_{m <= i < n} f(i) = - \sum_{n <= i < m} f(i) for m > n
#
# It is important to note that he defines all sums with
# the upper limit being *exclusive*.
# In contrast, sympy and the usual mathematical notation has:
#
# sum_{i = a}^b f(i) = f(a) + f(a+1) + ... + f(b-1) + f(b)
#
# with the upper limit *inclusive*. So translating between
# the two we find that:
#
# \sum_{m <= i < n} f(i) = \sum_{i = m}^{n-1} f(i)
#
# where we intentionally used two different ways to typeset the
# sum and its limits.
i = Symbol("i", integer=True)
k = Symbol("k", integer=True)
j = Symbol("j", integer=True)
# A simple example with a concrete summand and symbolic limits.
# The normal sum: m = k and n = k + j and therefore m < n:
m = k
n = k + j
a = m
b = n - 1
S1 = Sum(i**2, (i, a, b)).doit()
# The reversed sum: m = k + j and n = k and therefore m > n:
m = k + j
n = k
a = m
b = n - 1
S2 = Sum(i**2, (i, a, b)).doit()
assert simplify(S1 + S2) == 0
# Test the empty sum: m = k and n = k and therefore m = n:
m = k
n = k
a = m
b = n - 1
Sz = Sum(i**2, (i, a, b)).doit()
assert Sz == 0
# Another example this time with an unspecified summand and
# numeric limits. (We can not do both tests in the same example.)
f = Function("f")
# The normal sum with m < n:
m = 2
n = 11
a = m
b = n - 1
S1 = Sum(f(i), (i, a, b)).doit()
# The reversed sum with m > n:
m = 11
n = 2
a = m
b = n - 1
S2 = Sum(f(i), (i, a, b)).doit()
assert simplify(S1 + S2) == 0
# Test the empty sum with m = n:
m = 5
n = 5
a = m
b = n - 1
Sz = Sum(f(i), (i, a, b)).doit()
assert Sz == 0
def test_karr_proposition_2a():
# Test Karr, page 309, proposition 2, part a
i = Symbol("i", integer=True)
u = Symbol("u", integer=True)
v = Symbol("v", integer=True)
def test_the_sum(m, n):
# g
g = i**3 + 2*i**2 - 3*i
# f = Delta g
f = simplify(g.subs(i, i+1) - g)
# The sum
a = m
b = n - 1
S = Sum(f, (i, a, b)).doit()
# Test if Sum_{m <= i < n} f(i) = g(n) - g(m)
assert simplify(S - (g.subs(i, n) - g.subs(i, m))) == 0
# m < n
test_the_sum(u, u+v)
# m = n
test_the_sum(u, u )
# m > n
test_the_sum(u+v, u )
def test_karr_proposition_2b():
# Test Karr, page 309, proposition 2, part b
i = Symbol("i", integer=True)
u = Symbol("u", integer=True)
v = Symbol("v", integer=True)
w = Symbol("w", integer=True)
def test_the_sum(l, n, m):
# Summand
s = i**3
# First sum
a = l
b = n - 1
S1 = Sum(s, (i, a, b)).doit()
# Second sum
a = l
b = m - 1
S2 = Sum(s, (i, a, b)).doit()
# Third sum
a = m
b = n - 1
S3 = Sum(s, (i, a, b)).doit()
# Test if S1 = S2 + S3 as required
assert S1 - (S2 + S3) == 0
# l < m < n
test_the_sum(u, u+v, u+v+w)
# l < m = n
test_the_sum(u, u+v, u+v )
# l < m > n
test_the_sum(u, u+v+w, v )
# l = m < n
test_the_sum(u, u, u+v )
# l = m = n
test_the_sum(u, u, u )
# l = m > n
test_the_sum(u+v, u+v, u )
# l > m < n
test_the_sum(u+v, u, u+w )
# l > m = n
test_the_sum(u+v, u, u )
# l > m > n
test_the_sum(u+v+w, u+v, u )
def test_arithmetic_sums():
assert summation(1, (n, a, b)) == b - a + 1
assert Sum(S.NaN, (n, a, b)) is S.NaN
assert Sum(x, (n, a, a)).doit() == x
assert Sum(x, (x, a, a)).doit() == a
assert Sum(x, (n, 1, a)).doit() == a*x
lo, hi = 1, 2
s1 = Sum(n, (n, lo, hi))
s2 = Sum(n, (n, hi, lo))
assert s1 != s2
assert s1.doit() == 3 and s2.doit() == 0
lo, hi = x, x + 1
s1 = Sum(n, (n, lo, hi))
s2 = Sum(n, (n, hi, lo))
assert s1 != s2
assert s1.doit() == 2*x + 1 and s2.doit() == 0
assert Sum(Integral(x, (x, 1, y)) + x, (x, 1, 2)).doit() == \
y**2 + 2
assert summation(1, (n, 1, 10)) == 10
assert summation(2*n, (n, 0, 10**10)) == 100000000010000000000
assert summation(4*n*m, (n, a, 1), (m, 1, d)).expand() == \
2*d + 2*d**2 + a*d + a*d**2 - d*a**2 - a**2*d**2
assert summation(cos(n), (n, -2, 1)) == cos(-2) + cos(-1) + cos(0) + cos(1)
assert summation(cos(n), (n, x, x + 2)) == cos(x) + cos(x + 1) + cos(x + 2)
assert isinstance(summation(cos(n), (n, x, x + S.Half)), Sum)
assert summation(k, (k, 0, oo)) == oo
def test_polynomial_sums():
assert summation(n**2, (n, 3, 8)) == 199
assert summation(n, (n, a, b)) == \
((a + b)*(b - a + 1)/2).expand()
assert summation(n**2, (n, 1, b)) == \
((2*b**3 + 3*b**2 + b)/6).expand()
assert summation(n**3, (n, 1, b)) == \
((b**4 + 2*b**3 + b**2)/4).expand()
assert summation(n**6, (n, 1, b)) == \
((6*b**7 + 21*b**6 + 21*b**5 - 7*b**3 + b)/42).expand()
def test_geometric_sums():
assert summation(pi**n, (n, 0, b)) == (1 - pi**(b + 1)) / (1 - pi)
assert summation(2 * 3**n, (n, 0, b)) == 3**(b + 1) - 1
assert summation(Rational(1, 2)**n, (n, 1, oo)) == 1
assert summation(2**n, (n, 0, b)) == 2**(b + 1) - 1
assert summation(2**n, (n, 1, oo)) == oo
assert summation(2**(-n), (n, 1, oo)) == 1
assert summation(3**(-n), (n, 4, oo)) == Rational(1, 54)
assert summation(2**(-4*n + 3), (n, 1, oo)) == Rational(8, 15)
assert summation(2**(n + 1), (n, 1, b)).expand() == 4*(2**b - 1)
# issue 6664:
assert summation(x**n, (n, 0, oo)) == \
Piecewise((1/(-x + 1), Abs(x) < 1), (Sum(x**n, (n, 0, oo)), True))
assert summation(-2**n, (n, 0, oo)) == -oo
assert summation(I**n, (n, 0, oo)) == Sum(I**n, (n, 0, oo))
# issue 6802:
assert summation((-1)**(2*x + 2), (x, 0, n)) == n + 1
assert summation((-2)**(2*x + 2), (x, 0, n)) == 4*4**(n + 1)/S(3) - S(4)/3
assert summation((-1)**x, (x, 0, n)) == -(-1)**(n + 1)/S(2) + S(1)/2
assert summation(y**x, (x, a, b)) == \
Piecewise((-a + b + 1, Eq(y, 1)), ((y**a - y**(b + 1))/(-y + 1), True))
assert summation((-2)**(y*x + 2), (x, 0, n)) == \
4*Piecewise((n + 1, Eq((-2)**y, 1)),
((-(-2)**(y*(n + 1)) + 1)/(-(-2)**y + 1), True))
# issue 8251:
assert summation((1/(n + 1)**2)*n**2, (n, 0, oo)) == oo
def test_harmonic_sums():
assert summation(1/k, (k, 0, n)) == Sum(1/k, (k, 0, n))
assert summation(1/k, (k, 1, n)) == harmonic(n)
assert summation(n/k, (k, 1, n)) == n*harmonic(n)
assert summation(1/k, (k, 5, n)) == harmonic(n) - harmonic(4)
def test_composite_sums():
f = Rational(1, 2)*(7 - 6*n + Rational(1, 7)*n**3)
s = summation(f, (n, a, b))
assert not isinstance(s, Sum)
A = 0
for i in range(-3, 5):
A += f.subs(n, i)
B = s.subs(a, -3).subs(b, 4)
assert A == B
def test_hypergeometric_sums():
assert summation(
binomial(2*k, k)/4**k, (k, 0, n)) == (1 + 2*n)*binomial(2*n, n)/4**n
def test_other_sums():
f = m**2 + m*exp(m)
g = 3*exp(S(3)/2)/2 + exp(S(1)/2)/2 - exp(-S(1)/2)/2 - 3*exp(-S(3)/2)/2 + 5
assert summation(f, (m, -S(3)/2, S(3)/2)).expand() == g
assert summation(f, (m, -1.5, 1.5)).evalf().epsilon_eq(g.evalf(), 1e-10)
fac = factorial
def NS(e, n=15, **options):
return str(sympify(e).evalf(n, **options))
def test_evalf_fast_series():
# Euler transformed series for sqrt(1+x)
assert NS(Sum(
fac(2*n + 1)/fac(n)**2/2**(3*n + 1), (n, 0, oo)), 100) == NS(sqrt(2), 100)
# Some series for exp(1)
estr = NS(E, 100)
assert NS(Sum(1/fac(n), (n, 0, oo)), 100) == estr
assert NS(1/Sum((1 - 2*n)/fac(2*n), (n, 0, oo)), 100) == estr
assert NS(Sum((2*n + 1)/fac(2*n), (n, 0, oo)), 100) == estr
assert NS(Sum((4*n + 3)/2**(2*n + 1)/fac(2*n + 1), (n, 0, oo))**2, 100) == estr
pistr = NS(pi, 100)
# Ramanujan series for pi
assert NS(9801/sqrt(8)/Sum(fac(
4*n)*(1103 + 26390*n)/fac(n)**4/396**(4*n), (n, 0, oo)), 100) == pistr
assert NS(1/Sum(
binomial(2*n, n)**3 * (42*n + 5)/2**(12*n + 4), (n, 0, oo)), 100) == pistr
# Machin's formula for pi
assert NS(16*Sum((-1)**n/(2*n + 1)/5**(2*n + 1), (n, 0, oo)) -
4*Sum((-1)**n/(2*n + 1)/239**(2*n + 1), (n, 0, oo)), 100) == pistr
# Apery's constant
astr = NS(zeta(3), 100)
P = 126392*n**5 + 412708*n**4 + 531578*n**3 + 336367*n**2 + 104000* \
n + 12463
assert NS(Sum((-1)**n * P / 24 * (fac(2*n + 1)*fac(2*n)*fac(
n))**3 / fac(3*n + 2) / fac(4*n + 3)**3, (n, 0, oo)), 100) == astr
assert NS(Sum((-1)**n * (205*n**2 + 250*n + 77)/64 * fac(n)**10 /
fac(2*n + 1)**5, (n, 0, oo)), 100) == astr
def test_evalf_fast_series_issue_4021():
# Catalan's constant
assert NS(Sum((-1)**(n - 1)*2**(8*n)*(40*n**2 - 24*n + 3)*fac(2*n)**3*
fac(n)**2/n**3/(2*n - 1)/fac(4*n)**2, (n, 1, oo))/64, 100) == \
NS(Catalan, 100)
astr = NS(zeta(3), 100)
assert NS(5*Sum(
(-1)**(n - 1)*fac(n)**2 / n**3 / fac(2*n), (n, 1, oo))/2, 100) == astr
assert NS(Sum((-1)**(n - 1)*(56*n**2 - 32*n + 5) / (2*n - 1)**2 * fac(n - 1)
**3 / fac(3*n), (n, 1, oo))/4, 100) == astr
def test_evalf_slow_series():
assert NS(Sum((-1)**n / n, (n, 1, oo)), 15) == NS(-log(2), 15)
assert NS(Sum((-1)**n / n, (n, 1, oo)), 50) == NS(-log(2), 50)
assert NS(Sum(1/n**2, (n, 1, oo)), 15) == NS(pi**2/6, 15)
assert NS(Sum(1/n**2, (n, 1, oo)), 100) == NS(pi**2/6, 100)
assert NS(Sum(1/n**2, (n, 1, oo)), 500) == NS(pi**2/6, 500)
assert NS(Sum((-1)**n / (2*n + 1)**3, (n, 0, oo)), 15) == NS(pi**3/32, 15)
assert NS(Sum((-1)**n / (2*n + 1)**3, (n, 0, oo)), 50) == NS(pi**3/32, 50)
def test_euler_maclaurin():
# Exact polynomial sums with E-M
def check_exact(f, a, b, m, n):
A = Sum(f, (k, a, b))
s, e = A.euler_maclaurin(m, n)
assert (e == 0) and (s.expand() == A.doit())
check_exact(k**4, a, b, 0, 2)
check_exact(k**4 + 2*k, a, b, 1, 2)
check_exact(k**4 + k**2, a, b, 1, 5)
check_exact(k**5, 2, 6, 1, 2)
check_exact(k**5, 2, 6, 1, 3)
# Not exact
assert Sum(k**6, (k, a, b)).euler_maclaurin(0, 2)[1] != 0
# Numerical test
for m, n in [(2, 4), (2, 20), (10, 20), (18, 20)]:
A = Sum(1/k**3, (k, 1, oo))
s, e = A.euler_maclaurin(m, n)
assert abs((s - zeta(3)).evalf()) < e.evalf()
def test_evalf_euler_maclaurin():
assert NS(Sum(1/k**k, (k, 1, oo)), 15) == '1.29128599706266'
assert NS(Sum(1/k**k, (k, 1, oo)),
50) == '1.2912859970626635404072825905956005414986193682745'
assert NS(Sum(1/k - log(1 + 1/k), (k, 1, oo)), 15) == NS(EulerGamma, 15)
assert NS(Sum(1/k - log(1 + 1/k), (k, 1, oo)), 50) == NS(EulerGamma, 50)
assert NS(Sum(log(k)/k**2, (k, 1, oo)), 15) == '0.937548254315844'
assert NS(Sum(log(k)/k**2, (k, 1, oo)),
50) == '0.93754825431584375370257409456786497789786028861483'
assert NS(Sum(1/k, (k, 1000000, 2000000)), 15) == '0.693147930560008'
assert NS(Sum(1/k, (k, 1000000, 2000000)),
50) == '0.69314793056000780941723211364567656807940638436025'
def test_evalf_symbolic():
f, g = symbols('f g', cls=Function)
# issue 6328
expr = Sum(f(x), (x, 1, 3)) + Sum(g(x), (x, 1, 3))
assert expr.evalf() == expr
def test_evalf_issue_3273():
assert Sum(0, (k, 1, oo)).evalf() == 0
def test_simple_products():
assert Product(S.NaN, (x, 1, 3)) is S.NaN
assert product(S.NaN, (x, 1, 3)) is S.NaN
assert Product(x, (n, a, a)).doit() == x
assert Product(x, (x, a, a)).doit() == a
assert Product(x, (y, 1, a)).doit() == x**a
lo, hi = 1, 2
s1 = Product(n, (n, lo, hi))
s2 = Product(n, (n, hi, lo))
assert s1 != s2
# This IS correct according to Karr product convention
assert s1.doit() == 2
assert s2.doit() == 1
lo, hi = x, x + 1
s1 = Product(n, (n, lo, hi))
s2 = Product(n, (n, hi, lo))
s3 = 1 / Product(n, (n, hi + 1, lo - 1))
assert s1 != s2
# This IS correct according to Karr product convention
assert s1.doit() == x*(x + 1)
assert s2.doit() == 1
assert s3.doit() == x*(x + 1)
assert Product(Integral(2*x, (x, 1, y)) + 2*x, (x, 1, 2)).doit() == \
(y**2 + 1)*(y**2 + 3)
assert product(2, (n, a, b)) == 2**(b - a + 1)
assert product(n, (n, 1, b)) == factorial(b)
assert product(n**3, (n, 1, b)) == factorial(b)**3
assert product(3**(2 + n), (n, a, b)) \
== 3**(2*(1 - a + b) + b/2 + (b**2)/2 + a/2 - (a**2)/2)
assert product(cos(n), (n, 3, 5)) == cos(3)*cos(4)*cos(5)
assert product(cos(n), (n, x, x + 2)) == cos(x)*cos(x + 1)*cos(x + 2)
assert isinstance(product(cos(n), (n, x, x + S.Half)), Product)
# If Product managed to evaluate this one, it most likely got it wrong!
assert isinstance(Product(n**n, (n, 1, b)), Product)
def test_rational_products():
assert simplify(product(1 + 1/n, (n, a, b))) == (1 + b)/a
assert simplify(product(n + 1, (n, a, b))) == gamma(2 + b)/gamma(1 + a)
assert simplify(product((n + 1)/(n - 1), (n, a, b))) == b*(1 + b)/(a*(a - 1))
assert simplify(product(n/(n + 1)/(n + 2), (n, a, b))) == \
a*gamma(a + 2)/(b + 1)/gamma(b + 3)
assert simplify(product(n*(n + 1)/(n - 1)/(n - 2), (n, a, b))) == \
b**2*(b - 1)*(1 + b)/(a - 1)**2/(a*(a - 2))
def test_wallis_product():
# Wallis product, given in two different forms to ensure that Product
# can factor simple rational expressions
A = Product(4*n**2 / (4*n**2 - 1), (n, 1, b))
B = Product((2*n)*(2*n)/(2*n - 1)/(2*n + 1), (n, 1, b))
half = Rational(1, 2)
R = pi/2 * factorial(b)**2 / factorial(b - half) / factorial(b + half)
assert simplify(A.doit()) == R
assert simplify(B.doit()) == R
# This one should eventually also be doable (Euler's product formula for sin)
# assert Product(1+x/n**2, (n, 1, b)) == ...
def test_telescopic_sums():
#checks also input 2 of comment 1 issue 4127
assert Sum(1/k - 1/(k + 1), (k, 1, n)).doit() == 1 - 1/(1 + n)
f = Function("f")
assert Sum(
f(k) - f(k + 2), (k, m, n)).doit() == -f(1 + n) - f(2 + n) + f(m) + f(1 + m)
assert Sum(cos(k) - cos(k + 3), (k, 1, n)).doit() == -cos(1 + n) - \
cos(2 + n) - cos(3 + n) + cos(1) + cos(2) + cos(3)
# dummy variable shouldn't matter
assert telescopic(1/m, -m/(1 + m), (m, n - 1, n)) == \
telescopic(1/k, -k/(1 + k), (k, n - 1, n))
assert Sum(1/x/(x - 1), (x, a, b)).doit() == -((a - b - 1)/(b*(a - 1)))
def test_sum_reconstruct():
s = Sum(n**2, (n, -1, 1))
assert s == Sum(*s.args)
raises(ValueError, lambda: Sum(x, x))
raises(ValueError, lambda: Sum(x, (x, 1)))
def test_limit_subs():
for F in (Sum, Product, Integral):
assert F(a*exp(a), (a, -2, 2)) == F(a*exp(a), (a, -b, b)).subs(b, 2)
assert F(a, (a, F(b, (b, 1, 2)), 4)).subs(F(b, (b, 1, 2)), c) == \
F(a, (a, c, 4))
assert F(x, (x, 1, x + y)).subs(x, 1) == F(x, (x, 1, y + 1))
def test_function_subs():
f = Function("f")
S = Sum(x*f(y),(x,0,oo),(y,0,oo))
assert S.subs(f(y),y) == Sum(x*y,(x,0,oo),(y,0,oo))
assert S.subs(f(x),x) == S
raises(ValueError, lambda: S.subs(f(y),x+y) )
S = Sum(x*log(y),(x,0,oo),(y,0,oo))
assert S.subs(log(y),y) == S
f = Symbol('f')
S = Sum(x*f(y),(x,0,oo),(y,0,oo))
assert S.subs(f(y),y) == Sum(x*y,(x,0,oo),(y,0,oo))
def test_equality():
# if this fails remove special handling below
raises(ValueError, lambda: Sum(x, x))
r = symbols('x', real=True)
for F in (Sum, Product, Integral):
try:
assert F(x, x) != F(y, y)
assert F(x, (x, 1, 2)) != F(x, x)
assert F(x, (x, x)) != F(x, x) # or else they print the same
assert F(1, x) != F(1, y)
except ValueError:
pass
assert F(a, (x, 1, 2)) != F(a, (x, 1, 3))
assert F(a, (x, 1, 2)) != F(b, (x, 1, 2))
assert F(x, (x, 1, 2)) != F(r, (r, 1, 2))
assert F(1, (x, 1, x)) != F(1, (y, 1, x))
assert F(1, (x, 1, x)) != F(1, (y, 1, y))
# issue 5265
assert Sum(x, (x, 1, x)).subs(x, a) == Sum(x, (x, 1, a))
def test_Sum_doit():
assert Sum(n*Integral(a**2), (n, 0, 2)).doit() == a**3
assert Sum(n*Integral(a**2), (n, 0, 2)).doit(deep=False) == \
3*Integral(a**2)
assert summation(n*Integral(a**2), (n, 0, 2)) == 3*Integral(a**2)
# test nested sum evaluation
s = Sum( Sum( Sum(2,(z,1,n+1)), (y,x+1,n)), (x,1,n))
assert 0 == (s.doit() - n*(n+1)*(n-1)).factor()
assert Sum(Sum(KroneckerDelta(m, n), (m, 1, 3)), (n, 1, 3)).doit() == 3
assert Sum(Sum(KroneckerDelta(k, m), (m, 1, 3)), (n, 1, 3)).doit() == \
3*Piecewise((1, And(S(1) <= k, k <= 3)), (0, True))
assert Sum(f(n)*Sum(KroneckerDelta(m, n), (m, 0, oo)), (n, 1, 3)).doit() == \
f(1) + f(2) + f(3)
assert Sum(f(n)*Sum(KroneckerDelta(m, n), (m, 0, oo)), (n, 1, oo)).doit() == \
Sum(Piecewise((f(n), And(Le(0, n), n < oo)), (0, True)), (n, 1, oo))
l = Symbol('l', integer=True, positive=True)
assert Sum(f(l)*Sum(KroneckerDelta(m, l), (m, 0, oo)), (l, 1, oo)).doit() == \
Sum(f(l), (l, 1, oo))
# issue 2597
nmax = symbols('N', integer=True, positive=True)
pw = Piecewise((1, And(S(1) <= n, n <= nmax)), (0, True))
assert Sum(pw, (n, 1, nmax)).doit() == Sum(pw, (n, 1, nmax))
def test_Product_doit():
assert Product(n*Integral(a**2), (n, 1, 3)).doit() == 2 * a**9 / 9
assert Product(n*Integral(a**2), (n, 1, 3)).doit(deep=False) == \
6*Integral(a**2)**3
assert product(n*Integral(a**2), (n, 1, 3)) == 6*Integral(a**2)**3
def test_Sum_interface():
assert isinstance(Sum(0, (n, 0, 2)), Sum)
assert Sum(nan, (n, 0, 2)) is nan
assert Sum(nan, (n, 0, oo)) is nan
assert Sum(0, (n, 0, 2)).doit() == 0
assert isinstance(Sum(0, (n, 0, oo)), Sum)
assert Sum(0, (n, 0, oo)).doit() == 0
raises(ValueError, lambda: Sum(1))
raises(ValueError, lambda: summation(1))
def test_eval_diff():
assert Sum(x, (x, 1, 2)).diff(x) == 0
assert Sum(x*y, (x, 1, 2)).diff(x) == 0
assert Sum(x*y, (y, 1, 2)).diff(x) == Sum(y, (y, 1, 2))
e = Sum(x*y, (x, 1, a))
assert e.diff(a) == Derivative(e, a)
assert Sum(x*y, (x, 1, 3), (a, 2, 5)).diff(y) == \
Sum(x*y, (x, 1, 3), (a, 2, 5)).doit().diff(y) == 24
def test_hypersum():
from sympy import simplify, sin, hyper
assert simplify(summation(x**n/fac(n), (n, 1, oo))) == -1 + exp(x)
assert summation((-1)**n * x**(2*n) / fac(2*n), (n, 0, oo)) == cos(x)
assert simplify(summation((-1)**n*x**(2*n + 1) /
factorial(2*n + 1), (n, 3, oo))) == -x + sin(x) + x**3/6 - x**5/120
assert summation(1/(n + 2)**3, (n, 1, oo)) == -S(9)/8 + zeta(3)
assert summation(1/n**4, (n, 1, oo)) == pi**4/90
s = summation(x**n*n, (n, -oo, 0))
assert s.is_Piecewise
assert s.args[0].args[0] == -1/(x*(1 - 1/x)**2)
assert s.args[0].args[1] == (abs(1/x) < 1)
m = Symbol('n', integer=True, positive=True)
assert summation(binomial(m, k), (k, 0, m)) == 2**m
def test_issue_4170():
assert summation(1/factorial(k), (k, 0, oo)) == E
def test_is_commutative():
from sympy.physics.secondquant import NO, F, Fd
m = Symbol('m', commutative=False)
for f in (Sum, Product, Integral):
assert f(z, (z, 1, 1)).is_commutative is True
assert f(z*y, (z, 1, 6)).is_commutative is True
assert f(m*x, (x, 1, 2)).is_commutative is False
assert f(NO(Fd(x)*F(y))*z, (z, 1, 2)).is_commutative is False
def test_is_zero():
for func in [Sum, Product]:
assert func(0, (x, 1, 1)).is_zero is True
assert func(x, (x, 1, 1)).is_zero is None
def test_is_number():
# is number should not rely on evaluation or assumptions,
# it should be equivalent to `not foo.free_symbols`
assert Sum(1, (x, 1, 1)).is_number is True
assert Sum(1, (x, 1, x)).is_number is False
assert Sum(0, (x, y, z)).is_number is False
assert Sum(x, (y, 1, 2)).is_number is False
assert Sum(x, (y, 1, 1)).is_number is False
assert Sum(x, (x, 1, 2)).is_number is True
assert Sum(x*y, (x, 1, 2), (y, 1, 3)).is_number is True
assert Product(2, (x, 1, 1)).is_number is True
assert Product(2, (x, 1, y)).is_number is False
assert Product(0, (x, y, z)).is_number is False
assert Product(1, (x, y, z)).is_number is False
assert Product(x, (y, 1, x)).is_number is False
assert Product(x, (y, 1, 2)).is_number is False
assert Product(x, (y, 1, 1)).is_number is False
assert Product(x, (x, 1, 2)).is_number is True
def test_free_symbols():
for func in [Sum, Product]:
assert func(1, (x, 1, 2)).free_symbols == set()
assert func(0, (x, 1, y)).free_symbols == set([y])
assert func(2, (x, 1, y)).free_symbols == set([y])
assert func(x, (x, 1, 2)).free_symbols == set()
assert func(x, (x, 1, y)).free_symbols == set([y])
assert func(x, (y, 1, y)).free_symbols == set([x, y])
assert func(x, (y, 1, 2)).free_symbols == set([x])
assert func(x, (y, 1, 1)).free_symbols == set([x])
assert func(x, (y, 1, z)).free_symbols == set([x, z])
assert func(x, (x, 1, y), (y, 1, 2)).free_symbols == set()
assert func(x, (x, 1, y), (y, 1, z)).free_symbols == set([z])
assert func(x, (x, 1, y), (y, 1, y)).free_symbols == set([y])
assert func(x, (y, 1, y), (y, 1, z)).free_symbols == set([x, z])
assert Sum(1, (x, 1, y)).free_symbols == set([y])
# free_symbols answers whether the object *as written* has free symbols,
# not whether the evaluated expression has free symbols
assert Product(1, (x, 1, y)).free_symbols == set([y])
def test_conjugate_transpose():
A, B = symbols("A B", commutative=False)
p = Sum(A*B**n, (n, 1, 3))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
def test_issue_4171():
assert summation(factorial(2*k + 1)/factorial(2*k), (k, 0, oo)) == oo
assert summation(2*k + 1, (k, 0, oo)) == oo
def test_issue_6273():
assert Sum(x, (x, 1, n)).n(2, subs={n: 1}) == 1
def test_issue_6274():
assert Sum(x, (x, 1, 0)).doit() == 0
assert NS(Sum(x, (x, 1, 0))) == '0'
assert Sum(n, (n, 10, 5)).doit() == -30
assert NS(Sum(n, (n, 10, 5))) == '-30.0000000000000'
def test_simplify():
y, t = symbols('y, t')
assert simplify(Sum(x*y, (x, n, m), (y, a, k)) + \
Sum(y, (x, n, m), (y, a, k))) == Sum(x*y + y, (x, n, m), (y, a, k))
assert simplify(Sum(x, (x, n, m)) + Sum(x, (x, m + 1, a))) == \
Sum(x, (x, n, a))
assert simplify(Sum(x, (x, k + 1, a)) + Sum(x, (x, n, k))) == \
Sum(x, (x, n, a))
assert simplify(Sum(x, (x, k + 1, a)) + Sum(x + 1, (x, n, k))) == \
Sum(x, (x, k + 1, a)) + Sum(x + 1, (x, n, k))
assert simplify(Sum(x, (x, 0, 3)) * 3 + 3 * Sum(x, (x, 4, 6)) + \
4 * Sum(z, (z, 0, 1))) == Sum(4*z, (z, 0, 1)) + Sum(3*x, (x, 0, 6))
assert simplify(3*Sum(x**2, (x, a, b)) + Sum(x, (x, a, b))) == \
Sum(3*x**2 + x, (x, a, b))
assert simplify(Sum(x**3, (x, n, k)) * 3 + 3 * Sum(x, (x, n, k)) + \
4 * y * Sum(z, (z, n, k))) + 1 == \
y*Sum(4*z, (z, n, k)) + Sum(3*x**3 + 3*x, (x, n, k)) + 1
assert simplify(Sum(x, (x, a, b)) + 1 + Sum(x, (x, b + 1, c))) == \
1 + Sum(x, (x, a, c))
assert simplify(Sum(x, (t, a, b)) + Sum(y, (t, a, b)) + \
Sum(x, (t, b+1, c))) == Sum(x + y, (t, a, b)) + Sum(x, (t, b+1, c))
assert simplify(Sum(x, (t, a, b)) + Sum(x, (t, b+1, c)) + \
Sum(y, (t, a, b))) == Sum(x + y, (t, a, b)) + Sum(x, (t, b+1, c))
assert simplify(Sum(x, (t, a, b)) + 2 * Sum(x, (t, b+1, c))) == \
simplify(Sum(x, (t, a, b)) + Sum(x, (t, b+1, c)) + Sum(x, (t, b+1, c)))
assert simplify(Sum(x, (x, a, b))*Sum(x**2, (x, a, b))) == \
Sum(x, (x, a, b)) * Sum(x**2, (x, a, b))
def test_change_index():
b, v = symbols('b, v', integer = True)
assert Sum(x, (x, a, b)).change_index(x, x + 1, y) == \
Sum(y - 1, (y, a + 1, b + 1))
assert Sum(x**2, (x, a, b)).change_index( x, x - 1) == \
Sum((x+1)**2, (x, a - 1, b - 1))
assert Sum(x**2, (x, a, b)).change_index( x, -x, y) == \
Sum((-y)**2, (y, -b, -a))
assert Sum(x, (x, a, b)).change_index( x, -x - 1) == \
Sum(-x - 1, (x, -b - 1, -a - 1))
assert Sum(x*y, (x, a, b), (y, c, d)).change_index( x, x - 1, z) == \
Sum((z + 1)*y, (z, a - 1, b - 1), (y, c, d))
assert Sum(x, (x, a, b)).change_index( x, x + v) == \
Sum(-v + x, (x, a + v, b + v))
assert Sum(x, (x, a, b)).change_index( x, -x - v) == \
Sum(-v - x, (x, -b - v, -a - v))
def test_reorder():
b, y, c, d, z = symbols('b, y, c, d, z', integer = True)
assert Sum(x*y, (x, a, b), (y, c, d)).reorder((0, 1)) == \
Sum(x*y, (y, c, d), (x, a, b))
assert Sum(x, (x, a, b), (x, c, d)).reorder((0, 1)) == \
Sum(x, (x, c, d), (x, a, b))
assert Sum(x*y + z, (x, a, b), (z, m, n), (y, c, d)).reorder(\
(2, 0), (0, 1)) == Sum(x*y + z, (z, m, n), (y, c, d), (x, a, b))
assert Sum(x*y*z, (x, a, b), (y, c, d), (z, m, n)).reorder(\
(0, 1), (1, 2), (0, 2)) == Sum(x*y*z, (x, a, b), (z, m, n), (y, c, d))
assert Sum(x*y*z, (x, a, b), (y, c, d), (z, m, n)).reorder(\
(x, y), (y, z), (x, z)) == Sum(x*y*z, (x, a, b), (z, m, n), (y, c, d))
assert Sum(x*y, (x, a, b), (y, c, d)).reorder((x, 1)) == \
Sum(x*y, (y, c, d), (x, a, b))
assert Sum(x*y, (x, a, b), (y, c, d)).reorder((y, x)) == \
Sum(x*y, (y, c, d), (x, a, b))
def test_reverse_order():
assert Sum(x, (x, 0, 3)).reverse_order(0) == Sum(-x, (x, 4, -1))
assert Sum(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(0, 1) == \
Sum(x*y, (x, 6, 0), (y, 7, -1))
assert Sum(x, (x, 1, 2)).reverse_order(0) == Sum(-x, (x, 3, 0))
assert Sum(x, (x, 1, 3)).reverse_order(0) == Sum(-x, (x, 4, 0))
assert Sum(x, (x, 1, a)).reverse_order(0) == Sum(-x, (x, a + 1, 0))
assert Sum(x, (x, a, 5)).reverse_order(0) == Sum(-x, (x, 6, a - 1))
assert Sum(x, (x, a + 1, a + 5)).reverse_order(0) == \
Sum(-x, (x, a + 6, a))
assert Sum(x, (x, a + 1, a + 2)).reverse_order(0) == \
Sum(-x, (x, a + 3, a))
assert Sum(x, (x, a + 1, a + 1)).reverse_order(0) == \
Sum(-x, (x, a + 2, a))
assert Sum(x, (x, a, b)).reverse_order(0) == Sum(-x, (x, b + 1, a - 1))
assert Sum(x, (x, a, b)).reverse_order(x) == Sum(-x, (x, b + 1, a - 1))
assert Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1) == \
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
assert Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x) == \
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
def test_issue_7097():
assert sum(x**n/n for n in range(1, 401)) == summation(x**n/n, (n, 1, 400))
def test_factor_expand_subs():
# test factoring
assert Sum(4 * x, (x, 1, y)).factor() == 4 * Sum(x, (x, 1, y))
assert Sum(x * a, (x, 1, y)).factor() == a * Sum(x, (x, 1, y))
assert Sum(4 * x * a, (x, 1, y)).factor() == 4 * a * Sum(x, (x, 1, y))
assert Sum(4 * x * y, (x, 1, y)).factor() == 4 * y * Sum(x, (x, 1, y))
# test expand
assert Sum(x+1,(x,1,y)).expand() == Sum(x,(x,1,y)) + Sum(1,(x,1,y))
assert Sum(x+a*x**2,(x,1,y)).expand() == Sum(x,(x,1,y)) + Sum(a*x**2,(x,1,y))
assert Sum(x**(n + 1)*(n + 1), (n, -1, oo)).expand() \
== Sum(x*x**n, (n, -1, oo)) + Sum(n*x*x**n, (n, -1, oo))
assert Sum(x**(n + 1)*(n + 1), (n, -1, oo)).expand(power_exp=False) \
== Sum(n*x**(n+1), (n, -1, oo)) + Sum(x**(n+1), (n, -1, oo))
assert Sum(a*n+a*n**2,(n,0,4)).expand() \
== Sum(a*n,(n,0,4)) + Sum(a*n**2,(n,0,4))
assert Sum(x**a*x**n,(x,0,3)) \
== Sum(x**(a+n),(x,0,3)).expand(power_exp=True)
assert Sum(x**(a+n),(x,0,3)) \
== Sum(x**(a+n),(x,0,3)).expand(power_exp=False)
# test subs
assert Sum(1/(1+a*x**2),(x,0,3)).subs([(a,3)]) == Sum(1/(1+3*x**2),(x,0,3))
assert Sum(x*y,(x,0,y),(y,0,x)).subs([(x,3)]) == Sum(x*y,(x,0,y),(y,0,3))
assert Sum(x,(x,1,10)).subs([(x,y-2)]) == Sum(x,(x,1,10))
assert Sum(1/x,(x,1,10)).subs([(x,(3+n)**3)]) == Sum(1/x,(x,1,10))
assert Sum(1/x,(x,1,10)).subs([(x,3*x-2)]) == Sum(1/x,(x,1,10))
def test_distribution_over_equality():
assert Product(Eq(x*2, f(x)), (x, 1, 3)).doit() == Eq(48, f(1)*f(2)*f(3))
assert Sum(Eq(f(x), x**2), (x, 0, y)) == \
Eq(Sum(f(x), (x, 0, y)), Sum(x**2, (x, 0, y)))
def test_issue_2787():
n, k = symbols('n k', positive=True, integer=True)
p = symbols('p', positive=True)
binomial_dist = binomial(n, k)*p**k*(1 - p)**(n - k)
s = Sum(binomial_dist*k, (k, 0, n))
res = s.doit().simplify()
assert res == Piecewise(
(n*p, And(Or(-n + 1 < 0, Ne(p/(p - 1), 1)), p/Abs(p - 1) <= 1)),
(Sum(k*p**k*(-p + 1)**(-k)*(-p + 1)**n*binomial(n, k), (k, 0, n)),
True))
def test_issue_4668():
assert summation(1/n, (n, 2, oo)) == oo
| bsd-3-clause |
40223110/2015CDAFinal_test2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 |
gperciva/artifastring | interactive/tuning_pitch.py | 1 | 1587 | #!/usr/bin/env python
##
# Copyright 2010--2013 Graham Percival
# This file is part of Artifastring.
#
# Artifastring is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# Artifastring is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with Artifastring. If not, see
# <http://www.gnu.org/licenses/>.
##
def expected_pitch(instrument_number, which_string):
# tuned to equal temperament
if instrument_number == 0:
if which_string == 3:
return 659.3
if which_string == 2:
return 440.0
if which_string == 1:
return 293.7
if which_string == 0:
return 196.0
if instrument_number == 1:
if which_string == 3:
return 440.0
if which_string == 2:
return 293.7
if which_string == 1:
return 196.0
if which_string == 0:
return 130.8
if instrument_number == 2:
if which_string == 3:
return 220.0
if which_string == 2:
return 146.8
if which_string == 1:
return 98.0
if which_string == 0:
return 65.4
| gpl-3.0 |
bcl/pykickstart | pykickstart/handlers/f22.py | 3 | 5158 | #
# Chris Lumens <[email protected]>
#
# Copyright 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
__all__ = ["F22Handler"]
from pykickstart import commands
from pykickstart.base import BaseHandler
from pykickstart.version import F22
class F22Handler(BaseHandler):
version = F22
commandMap = {
"auth": commands.authconfig.FC3_Authconfig,
"authconfig": commands.authconfig.FC3_Authconfig,
"autopart": commands.autopart.F21_AutoPart,
"autostep": commands.autostep.FC3_AutoStep,
"bootloader": commands.bootloader.F21_Bootloader,
"btrfs": commands.btrfs.F17_BTRFS,
"cdrom": commands.cdrom.FC3_Cdrom,
"clearpart": commands.clearpart.F21_ClearPart,
"cmdline": commands.displaymode.FC3_DisplayMode,
"device": commands.device.F8_Device,
"deviceprobe": commands.deviceprobe.FC3_DeviceProbe,
"dmraid": commands.dmraid.FC6_DmRaid,
"driverdisk": commands.driverdisk.F14_DriverDisk,
"eula": commands.eula.F20_Eula,
"fcoe": commands.fcoe.F13_Fcoe,
"firewall": commands.firewall.F20_Firewall,
"firstboot": commands.firstboot.FC3_Firstboot,
"graphical": commands.displaymode.FC3_DisplayMode,
"group": commands.group.F12_Group,
"halt": commands.reboot.F18_Reboot,
"harddrive": commands.harddrive.FC3_HardDrive,
"ignoredisk": commands.ignoredisk.F14_IgnoreDisk,
"install": commands.install.F20_Install,
"iscsi": commands.iscsi.F17_Iscsi,
"iscsiname": commands.iscsiname.FC6_IscsiName,
"keyboard": commands.keyboard.F18_Keyboard,
"lang": commands.lang.F19_Lang,
"liveimg": commands.liveimg.F19_Liveimg,
"logging": commands.logging.FC6_Logging,
"logvol": commands.logvol.F21_LogVol,
"mediacheck": commands.mediacheck.FC4_MediaCheck,
"method": commands.method.F19_Method,
"multipath": commands.multipath.FC6_MultiPath,
"network": commands.network.F22_Network,
"nfs": commands.nfs.FC6_NFS,
"ostreesetup": commands.ostreesetup.F21_OSTreeSetup,
"part": commands.partition.F20_Partition,
"partition": commands.partition.F20_Partition,
"poweroff": commands.reboot.F18_Reboot,
"raid": commands.raid.F20_Raid,
"realm": commands.realm.F19_Realm,
"reboot": commands.reboot.F18_Reboot,
"repo": commands.repo.F21_Repo,
"rescue": commands.rescue.F10_Rescue,
"rootpw": commands.rootpw.F18_RootPw,
"selinux": commands.selinux.FC3_SELinux,
"services": commands.services.FC6_Services,
"shutdown": commands.reboot.F18_Reboot,
"skipx": commands.skipx.FC3_SkipX,
"sshpw": commands.sshpw.F13_SshPw,
"sshkey": commands.sshkey.F22_SshKey,
"text": commands.displaymode.FC3_DisplayMode,
"timezone": commands.timezone.F18_Timezone,
"updates": commands.updates.F7_Updates,
"upgrade": commands.upgrade.F20_Upgrade,
"url": commands.url.F18_Url,
"user": commands.user.F19_User,
"vnc": commands.vnc.F9_Vnc,
"volgroup": commands.volgroup.F21_VolGroup,
"xconfig": commands.xconfig.F14_XConfig,
"zerombr": commands.zerombr.F9_ZeroMbr,
"zfcp": commands.zfcp.F14_ZFCP,
}
dataMap = {
"BTRFSData": commands.btrfs.F17_BTRFSData,
"DriverDiskData": commands.driverdisk.F14_DriverDiskData,
"DeviceData": commands.device.F8_DeviceData,
"DmRaidData": commands.dmraid.FC6_DmRaidData,
"FcoeData": commands.fcoe.F13_FcoeData,
"GroupData": commands.group.F12_GroupData,
"IscsiData": commands.iscsi.F17_IscsiData,
"LogVolData": commands.logvol.F21_LogVolData,
"MultiPathData": commands.multipath.FC6_MultiPathData,
"NetworkData": commands.network.F22_NetworkData,
"PartData": commands.partition.F18_PartData,
"RaidData": commands.raid.F18_RaidData,
"RepoData": commands.repo.F21_RepoData,
"SshPwData": commands.sshpw.F13_SshPwData,
"SshKeyData": commands.sshkey.F22_SshKeyData,
"UserData": commands.user.F19_UserData,
"VolGroupData": commands.volgroup.F21_VolGroupData,
"ZFCPData": commands.zfcp.F14_ZFCPData,
}
| gpl-2.0 |
aelarabawy/hostap | tests/hwsim/test_peerkey.py | 2 | 2575 | # PeerKey tests
# Copyright (c) 2013, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import hwsim_utils
import hostapd
from wlantest import Wlantest
def test_peerkey(dev, apdev):
"""RSN AP and PeerKey between two STAs"""
ssid = "test-peerkey"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['peerkey'] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True)
dev[1].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True)
hwsim_utils.test_connectivity_sta(dev[0], dev[1])
dev[0].request("STKSTART " + dev[1].p2p_interface_addr())
time.sleep(0.5)
# NOTE: Actual use of the direct link (DLS) is not supported in
# mac80211_hwsim, so this operation fails at setting the keys after
# successfully completed 4-way handshake. This test case does allow the
# key negotiation part to be tested for coverage, though.
def test_peerkey_unknown_peer(dev, apdev):
"""RSN AP and PeerKey attempt with unknown peer"""
ssid = "test-peerkey"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['peerkey'] = "1"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True)
dev[1].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True)
hwsim_utils.test_connectivity_sta(dev[0], dev[1])
dev[0].request("STKSTART " + dev[2].p2p_interface_addr())
time.sleep(0.5)
def test_peerkey_pairwise_mismatch(dev, apdev):
"""RSN TKIP+CCMP AP and PeerKey between two STAs using different ciphers"""
wt = Wlantest()
wt.flush()
wt.add_passphrase("12345678")
ssid = "test-peerkey"
passphrase = "12345678"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
params['peerkey'] = "1"
params['rsn_pairwise'] = "TKIP CCMP"
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True,
pairwise="CCMP")
dev[1].connect(ssid, psk=passphrase, scan_freq="2412", peerkey=True,
pairwise="TKIP")
hwsim_utils.test_connectivity_sta(dev[0], dev[1])
dev[0].request("STKSTART " + dev[1].p2p_interface_addr())
time.sleep(0.5)
dev[1].request("STKSTART " + dev[0].p2p_interface_addr())
time.sleep(0.5)
| gpl-2.0 |
ondrejch/FSM | scripts/mk1/surfaces.py | 1 | 2678 | #!/usr/bin/env python3
#
# Generate surfaces, pins, and lattice for FastDrum Serpent deck
# Ondrej Chvala, [email protected]
# 2016-08-02
import math
def write_surfaces(N, r, refl):
'''Function to write material cards for Serpent input deck.
Inputs:
N: size of the N x N checkerboard lattice
r: radius of the fuel in the fuel pin [cm]
refl: reflector thickness [cm]
Outputs:
surfaces: String containing the surface cards'''
rclad = r + 0.1 # Cladding outer radius
pitch = 2.0 * rclad + 0.01 # Lattice pitch
l10 = N * pitch / 2.0 # Radius of the cylinder bounding the lattice
l20 = l10 + pitch # Radius of the cylinder bounding the lead block
l21 = l20 + 0.1 # Radius of the air gap cylinder
l22 = l21 + refl # Radius of the steel reflector cylinder
fuel_rod_weight = 19.1 * math.pi * r*r * l10 *2.0 # Uranium mass in each rod [g]
surfaces = '''
%______________pins_________________________________________________
pin 1 % fuel pin
fuel {r}
ssteel {rclad}
air
pin 9 % lead block
lead
%______________surface definitions__________________________________
surf 10 cyl 0 0 {l10} -{l10} {l10} % Inner cylinder with the lattice
surf 20 cyl 0 0 {l20} -{l20} {l20} % Lead cylinder around the core
surf 21 cyl 0 0 {l21} -{l21} {l21} % Air gap - likely useless
surf 22 cyl 0 0 {l22} -{l22} {l22} % Radial reflector
%______________lattice definitions_____________
lat 50 1 0 0 {N} {N} {pitch}
'''
n_fuel_rods = 0;
for i in range(N): # Generates checkerboard lattice
iodd = 0
if i % 2 :
iodd = 1
for j in range(N):
jodd = 0
r_lat = math.sqrt((i-N/2.0 + 0.5)**2 + (j-N/2.0 + 0.5)**2) # cell radius in the lattice in units of N
if ((j+iodd) % 2) and (r_lat < (N-1)/2.0) :
surfaces += "1 "
n_fuel_rods = n_fuel_rods + 1
else:
surfaces += "9 "
surfaces += "\n"
surfaces += "\n"
surfaces += "% Number of fuel rods = {}\n".format(n_fuel_rods)
surfaces += "% Uranium weight per rod [kg] = {:8.4f}\n".format(fuel_rod_weight/ 1e3)
surfaces += "% Total uranium weight [kg] = {:8.4f}\n".format( n_fuel_rods * fuel_rod_weight/ 1e3)
surfaces = surfaces.format(**locals())
return surfaces
if __name__ == '__main__':
print("This module writes surfaces, pins, and lattice for FastDrum Serpent deck.")
input("Press Ctrl+C to quit, or enter else to test it.")
print(write_surfaces(11, 1.25, 50))
| gpl-3.0 |
halberom/ansible | lib/ansible/modules/storage/netapp/na_cdot_user_role.py | 18 | 7307 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: na_cdot_user_role
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create or destroy user roles
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the role to manage.
required: true
command_directory_name:
description:
- The command or command directory to which the role has an access.
required: true
access_level:
description:
- The name of the role to manage.
choices: ['none', 'readonly', 'all']
default: 'all'
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User Role
na_cdot_user_role:
state: present
name: ansibleRole
command_directory_name: DEFAULT
access_level: none
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUserRole(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
command_directory_name=dict(required=True, type='str'),
access_level=dict(required=False, type='str', default='all',
choices=['none', 'readonly', 'all']),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.command_directory_name = p['command_directory_name']
self.access_level = p['access_level']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_role(self):
"""
Checks if the role exists for specific command-directory-name.
:return:
True if role found
False if role is not found
:rtype: bool
"""
security_login_role_get_iter = netapp_utils.zapi.NaElement(
'security-login-role-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-info', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_role_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(
security_login_role_get_iter, enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
e = get_exception()
# Error 16031 denotes a role not being found.
if str(e.code) == "16031":
return False
else:
self.module.fail_json(msg='Error getting role %s' % self.name, exception=str(e))
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_role(self):
role_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-create', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name,
'access-level':
self.access_level})
try:
self.server.invoke_successfully(role_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error creating role %s' % self.name, exception=str(err))
def delete_role(self):
role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-role-delete', **{'vserver': self.vserver,
'role-name': self.name,
'command-directory-name':
self.command_directory_name})
try:
self.server.invoke_successfully(role_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error removing role %s' % self.name, exception=str(err))
def apply(self):
changed = False
role_exists = self.get_role()
if role_exists:
if self.state == 'absent':
changed = True
# Check if properties need to be updated
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not role_exists:
self.create_role()
# Update properties
elif self.state == 'absent':
self.delete_role()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUserRole()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
miminus/youtube-dl | youtube_dl/extractor/francetv.py | 44 | 12637 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
parse_duration,
determine_ext,
)
from .dailymotion import DailymotionCloudIE
class FranceTVBaseInfoExtractor(InfoExtractor):
def _extract_video(self, video_id, catalogue):
info = self._download_json(
'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
% (video_id, catalogue),
video_id, 'Downloading video JSON')
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
f4m_url = self._download_webpage(
'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url,
video_id, 'Downloading f4m manifest token', fatal=False)
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, 1, format_id))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4', m3u8_id=format_id))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
'preference': 1,
})
else:
formats.append({
'url': video_url,
'format_id': format_id,
'preference': -1,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': info['titre'],
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
}
class PluzzIE(FranceTVBaseInfoExtractor):
IE_NAME = 'pluzz.francetv.fr'
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
# Can't use tests, videos expire in 7 days
def _real_extract(self, url):
title = re.match(self._VALID_URL, url).group(1)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r'data-diffusion="(\d+)"', webpage, 'ID')
return self._extract_video(video_id, 'Pluzz')
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile)\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'flv',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'info_dict': {
'id': 'EV_20019',
'ext': 'mp4',
'title': 'Débat des candidats à la Commission européenne',
'description': 'Débat des candidats à la Commission européenne',
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
},
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'md5': 'f485bda6e185e7d15dbc69b72bae993e',
'info_dict': {
'id': '556e03339473995ee145930c',
'ext': 'mp4',
'title': 'Les entreprises familiales : le secret de la réussite',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
video_id, catalogue = self._search_regex(
r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
class FranceTVIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetv'
IE_DESC = 'France 2, 3, 4, 5 and Ô'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?france[2345o]\.fr/
(?:
emissions/[^/]+/(?:videos|diffusions)|
emission/[^/]+|
videos|
jt
)
/|
embed\.francetv\.fr/\?ue=
)
(?P<id>[^/?]+)
'''
_TESTS = [
# france2
{
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
'md5': 'c03fc87cb85429ffd55df32b9fc05523',
'info_dict': {
'id': '109169362',
'ext': 'flv',
'title': '13h15, le dimanche...',
'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
'upload_date': '20140914',
'timestamp': 1410693600,
},
},
# france3
{
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
'md5': '679bb8f8921f8623bd658fa2f8364da0',
'info_dict': {
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
'ext': 'mp4',
'title': 'Le scandale du prix des médicaments',
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
'upload_date': '20131113',
'timestamp': 1384380000,
},
},
# france4
{
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
'info_dict': {
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'ext': 'mp4',
'title': 'Hero Corp Making of - Extrait 1',
'description': 'md5:c87d54871b1790679aec1197e73d650a',
'upload_date': '20131106',
'timestamp': 1383766500,
},
},
# france5
{
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
'md5': '78f0f4064f9074438e660785bbf2c5d9',
'info_dict': {
'id': '108961659',
'ext': 'flv',
'title': 'C à dire ?!',
'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
'upload_date': '20140915',
'timestamp': 1410795000,
},
},
# franceo
{
'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015',
'md5': '47d5816d3b24351cdce512ad7ab31da8',
'info_dict': {
'id': '125377621',
'ext': 'flv',
'title': 'Infô soir',
'description': 'md5:01b8c6915a3d93d8bbbd692651714309',
'upload_date': '20150718',
'timestamp': 1437241200,
'duration': 414,
},
},
{
# francetv embed
'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87',
'info_dict': {
'id': 'EV_30231',
'ext': 'flv',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
},
{
'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05',
'only_matching': True,
},
{
'url': 'http://www.franceo.fr/videos/125377617',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id, catalogue = self._html_search_regex(
r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
class GenerationQuoiIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-quoi'
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous',
'info_dict': {
'id': 'k7FJX8VBcvvLmX4wA5Q',
'ext': 'mp4',
'title': 'Génération Quoi - Garde à Vous',
'uploader': 'Génération Quoi',
},
'params': {
# It uses Dailymotion
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % display_id)
info_json = self._download_webpage(info_url, display_id)
info = json.loads(info_json)
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
ie='Dailymotion')
class CultureboxIE(FranceTVBaseInfoExtractor):
IE_NAME = 'culturebox.francetvinfo.fr'
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
_TEST = {
'url': 'http://culturebox.francetvinfo.fr/live/musique/musique-classique/le-livre-vermeil-de-montserrat-a-la-cathedrale-delne-214511',
'md5': '9b88dc156781c4dbebd4c3e066e0b1d6',
'info_dict': {
'id': 'EV_50111',
'ext': 'flv',
'title': "Le Livre Vermeil de Montserrat à la Cathédrale d'Elne",
'description': 'md5:f8a4ad202e8fe533e2c493cc12e739d9',
'upload_date': '20150320',
'timestamp': 1426892400,
'duration': 2760.9,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError('Video %s is not available' % name, expected=True)
video_id, catalogue = self._search_regex(
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
| unlicense |
zhanghenry/stocks | tests/test_client_regress/views.py | 13 | 5231 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.test.utils import setup_test_environment
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
request_foo = request.REQUEST.get('foo')
request_bar = request.REQUEST.get('bar')
return render_to_response(template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'request-foo': request_foo,
'request-bar': request_bar,
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
setup_test_environment()
c = Client()
c.get("/no_template_view")
return render_to_response('base.html', {'nested': 'yes'})
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
| bsd-3-clause |
JRepoInd/plugin.video.tvondesizonexl | xoze/utils/system.py | 3 | 1966 | '''
Created on Oct 11, 2013
@author: 'ajdeveloped'
This file is part of XOZE.
XOZE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
XOZE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with XOZE. If not, see <http://www.gnu.org/licenses/>.
'''
# XBMC
import logging
try:
import xbmc # @UnresolvedImport
except:
from xoze.alternatives import xbmcdummy as xbmc
try:
import xbmcaddon # @UnresolvedImport
except:
from xoze.alternatives import xbmcdummy as xbmcaddon
def get_translated_path(filepath):
return xbmc.translatePath(filepath)
def get_addon(addon_id):
return xbmcaddon.Addon(id=addon_id)
def show_busy_dialog():
xbmc.executebuiltin('ActivateWindow(busydialog)')
show_busy_dialog
def hide_busy_dialog():
xbmc.executebuiltin('Dialog.Close(busydialog)')
exit_signal = xbmc.abortRequested
def exit_addon():
global exit_signal
exit_signal = True
_trans_table = {
logging.DEBUG: xbmc.LOGDEBUG,
logging.INFO: xbmc.LOGINFO,
logging.WARNING: xbmc.LOGWARNING,
logging.ERROR: xbmc.LOGERROR,
logging.CRITICAL: xbmc.LOGSEVERE,
}
class LoggingHandler(logging.Handler):
def emit(self, record):
if type(record.msg) is Exception:
logging.exception(record.msg)
import traceback
traceback.print_exc()
else:
message = record.msg
if type(message) is not str:
message = str(message)
xbmc_level = _trans_table[record.levelno]
xbmc.log(message, xbmc_level)
| gpl-3.0 |
pegasus-isi/pegasus | packages/pegasus-python/src/Pegasus/service/monitoring/errors.py | 1 | 1894 | import logging
from flask import make_response
from sqlalchemy.orm.exc import NoResultFound
from Pegasus.service._query import InvalidQueryError
from Pegasus.service._serialize import jsonify
from Pegasus.service._sort import InvalidSortError
from Pegasus.service.base import ErrorResponse, InvalidJSONError
from Pegasus.service.monitoring import monitoring
log = logging.getLogger(__name__)
JSON_HEADER = {"Content-Type": "application/json"}
"""
Error
{
"code" : <string:code>,
"message" : <string:message>,
"errors" : [
{
"field" : <string:field>,
"errors" : [
<string:errors>,
..
]
},
..
]
}
"""
@monitoring.errorhandler(NoResultFound)
def no_result_found(error):
e = ErrorResponse("NOT_FOUND", str(error))
response_json = jsonify(e)
return make_response(response_json, 404, JSON_HEADER)
@monitoring.errorhandler(InvalidQueryError)
def invalid_query_error(error):
e = ErrorResponse("INVALID_QUERY", str(error))
response_json = jsonify(e)
return make_response(response_json, 400, JSON_HEADER)
@monitoring.errorhandler(InvalidSortError)
def invalid_order_error(error):
e = ErrorResponse("INVALID_ORDER", str(error))
response_json = jsonify(e)
return make_response(response_json, 400, JSON_HEADER)
@monitoring.errorhandler(InvalidJSONError)
def invalid_json_error(error):
e = ErrorResponse("INVALID_JSON", str(error))
response_json = jsonify(e)
return make_response(response_json, 400, JSON_HEADER)
@monitoring.errorhandler(Exception)
def catch_all(error):
log.exception(error)
app_code, http_code = error.codes if hasattr(error, "codes") else ("UNKNOWN", 500)
e = ErrorResponse(app_code, str(error))
response_json = jsonify(e)
return make_response(response_json, http_code, JSON_HEADER)
| apache-2.0 |
NEricN/RobotCSimulator | Python/App/Lib/bsddb/dbtables.py | 98 | 30866 | #-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <[email protected]>
# This provides a simple database table interface built on top of
# the Python Berkeley DB 3 interface.
#
_cvsid = '$Id$'
import re
import sys
import copy
import random
import struct
if sys.version_info[0] >= 3 :
import pickle
else :
if sys.version_info < (2, 6) :
import cPickle as pickle
else :
# When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.filterwarnings(...)
# ...
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
import warnings
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.filterwarnings('ignore',
message='the cPickle module has been removed in Python 3.0',
category=DeprecationWarning)
import cPickle as pickle
finally :
w.__exit__()
del w
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
class TableDBError(StandardError):
pass
class TableAlreadyExists(TableDBError):
pass
class Cond:
"""This condition matches everything"""
def __call__(self, s):
return 1
class ExactCond(Cond):
"""Acts as an exact match condition function"""
def __init__(self, strtomatch):
self.strtomatch = strtomatch
def __call__(self, s):
return s == self.strtomatch
class PrefixCond(Cond):
"""Acts as a condition function for matching a string prefix"""
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, s):
return s[:len(self.prefix)] == self.prefix
class PostfixCond(Cond):
"""Acts as a condition function for matching a string postfix"""
def __init__(self, postfix):
self.postfix = postfix
def __call__(self, s):
return s[-len(self.postfix):] == self.postfix
class LikeCond(Cond):
"""
Acts as a function that will match using an SQL 'LIKE' style
string. Case insensitive and % signs are wild cards.
This isn't perfect but it should work for the simple common cases.
"""
def __init__(self, likestr, re_flags=re.IGNORECASE):
# escape python re characters
chars_to_escape = '.*+()[]?'
for char in chars_to_escape :
likestr = likestr.replace(char, '\\'+char)
# convert %s to wildcards
self.likestr = likestr.replace('%', '.*')
self.re = re.compile('^'+self.likestr+'$', re_flags)
def __call__(self, s):
return self.re.match(s)
#
# keys used to store database metadata
#
_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
_columns = '._COLUMNS__' # table_name+this key contains a list of columns
def _columns_key(table):
return table + _columns
#
# these keys are found within table sub databases
#
_data = '._DATA_.' # this+column+this+rowid key contains table data
_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
# row in the table. (no data is stored)
_rowid_str_len = 8 # length in bytes of the unique rowid strings
def _data_key(table, col, rowid):
return table + _data + col + _data + rowid
def _search_col_data_key(table, col):
return table + _data + col + _data
def _search_all_data_key(table):
return table + _data
def _rowid_key(table, rowid):
return table + _rowid + rowid + _rowid
def _search_rowid_key(table):
return table + _rowid
def contains_metastrings(s) :
"""Verify that the given string does not contain any
metadata strings that might interfere with dbtables database operation.
"""
if (s.find(_table_names_key) >= 0 or
s.find(_columns) >= 0 or
s.find(_data) >= 0 or
s.find(_rowid) >= 0):
# Then
return 1
else:
return 0
class bsdTableDB :
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
recover=0, dbflags=0):
"""bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.
"""
self.db = None
myflags = db.DB_THREAD
if create:
myflags |= db.DB_CREATE
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
def set_range(self, search) :
v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
if v is not None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
def __next__(self) :
v = getattr(self._dbcursor, "next")()
if v is not None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
class db_py3k(object) :
def __init__(self, db) :
self._db = db
def cursor(self, txn=None) :
return cursor_py3k(self._db.cursor(txn=txn))
def has_key(self, key, txn=None) :
return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
txn=txn)
def put(self, key, value, flags=0, txn=None) :
key = bytes(key, "iso8859-1")
if value is not None :
value = bytes(value, "iso8859-1")
return self._db.put(key, value, flags=flags, txn=txn)
def put_bytes(self, key, value, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.put(key, value, txn=txn)
def get(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
v = self._db.get(key, txn=txn, flags=flags)
if v is not None :
v = v.decode("iso8859-1")
return v
def get_bytes(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
return self._db.get(key, txn=txn, flags=flags)
def delete(self, key, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.delete(key, txn=txn)
def close (self) :
return self._db.close()
self.db = db_py3k(self.db)
else : # Python 2.x
pass
# Initialize the table names list if this is a new database
txn = self.env.txn_begin()
try:
if not getattr(self.db, "has_key")(_table_names_key, txn):
getattr(self.db, "put_bytes", self.db.put) \
(_table_names_key, pickle.dumps([], 1), txn=txn)
# Yes, bare except
except:
txn.abort()
raise
else:
txn.commit()
# TODO verify more of the database's metadata?
self.__tablecolumns = {}
def __del__(self):
self.close()
def close(self):
if self.db is not None:
self.db.close()
self.db = None
if self.env is not None:
self.env.close()
self.env = None
def checkpoint(self, mins=0):
self.env.txn_checkpoint(mins)
def sync(self):
self.db.sync()
def _db_print(self) :
"""Print the database to stdout for debugging"""
print "******** Printing raw database for debugging ********"
cur = self.db.cursor()
try:
key, data = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
key, data = next
else:
cur.close()
return
except db.DBNotFoundError:
cur.close()
def CreateTable(self, table, columns):
"""CreateTable(table, columns) - Create a new table in the database.
raises TableDBError if it already exists or for other DB errors.
"""
assert isinstance(columns, list)
txn = None
try:
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
if contains_metastrings(table):
raise ValueError(
"bad table name: contains reserved metastrings")
for column in columns :
if contains_metastrings(column):
raise ValueError(
"bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if getattr(self.db, "has_key")(columnlist_key):
raise TableAlreadyExists, "table already exists"
txn = self.env.txn_begin()
# store the table's column info
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(columns, 1), txn=txn)
# add the table name to the tablelist
tablelist = pickle.loads(getattr(self.db, "get_bytes",
self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
tablelist.append(table)
# delete 1st, in case we opened with DB_DUP
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
except db.DBError, dberror:
if txn:
txn.abort()
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def ListTableColumns(self, table):
"""Return a list of columns in the given table.
[] if the table doesn't exist.
"""
assert isinstance(table, str)
if contains_metastrings(table):
raise ValueError, "bad table name: contains reserved metastrings"
columnlist_key = _columns_key(table)
if not getattr(self.db, "has_key")(columnlist_key):
return []
pickledcolumnlist = getattr(self.db, "get_bytes",
self.db.get)(columnlist_key)
if pickledcolumnlist:
return pickle.loads(pickledcolumnlist)
else:
return []
def ListTables(self):
"""Return a list of tables in this database."""
pickledtablelist = self.db.get_get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
def CreateOrExtendTable(self, table, columns):
"""CreateOrExtendTable(table, columns)
Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.
"""
assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
# the table already existed, add any new columns
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
# load the current column list
oldcolumnlist = pickle.loads(
getattr(self.db, "get_bytes",
self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
# create a hash table for fast lookups of column names in the
# loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
# create a new column list containing both the old and new
# column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
if not c in oldcolumnhash:
newcolumnlist.append(c)
# store the table's new extended column list
if newcolumnlist != oldcolumnlist :
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(newcolumnlist, 1),
txn=txn)
txn.commit()
txn = None
self.__load_column_info(table)
except db.DBError, dberror:
if txn:
txn.abort()
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def __load_column_info(self, table) :
"""initialize the self.__tablecolumns dict"""
# check the column names
try:
tcolpickles = getattr(self.db, "get_bytes",
self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError, "unknown table: %r" % (table,)
if not tcolpickles:
raise TableDBError, "unknown table: %r" % (table,)
self.__tablecolumns[table] = pickle.loads(tcolpickles)
def __new_rowid(self, table, txn) :
"""Create a new unique row identifier"""
unique = 0
while not unique:
# Generate a random 64-bit row ID string
# (note: might have <64 bits of true randomness
# but it's plenty for our database id needs!)
blist = []
for x in xrange(_rowid_str_len):
blist.append(random.randint(0,255))
newid = struct.pack('B'*_rowid_str_len, *blist)
if sys.version_info[0] >= 3 :
newid = newid.decode("iso8859-1") # 8 bits
# Guarantee uniqueness by adding this key to the database
try:
self.db.put(_rowid_key(table, newid), None, txn=txn,
flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError:
pass
else:
unique = 1
return newid
def Insert(self, table, rowdict) :
"""Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.
"""
txn = None
try:
if not getattr(self.db, "has_key")(_columns_key(table)):
raise TableDBError, "unknown table"
# check the validity of each column name
if not table in self.__tablecolumns:
self.__load_column_info(table)
for column in rowdict.keys() :
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# get a unique row identifier for this row
txn = self.env.txn_begin()
rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
for column, dataitem in rowdict.items():
# store the value
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except db.DBError, dberror:
# WIBNI we could just abort the txn and re-raise the exception?
# But no, because TableDBError is not related to DBError via
# inheritance, so it would be backwards incompatible. Do the next
# best thing.
info = sys.exc_info()
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1], info[2]
else :
raise TableDBError, dberror.args[1], info[2]
def Modify(self, table, conditions={}, mappings={}):
"""Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
* table - the table name
* conditions - a dictionary keyed on column names containing
a condition callable expecting the data string as an
argument and returning a boolean.
* mappings - a dictionary keyed on column names containing a
condition callable expecting the data string as an argument and
returning the new string for that column.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# modify only requested columns
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
# modify the requested column
try:
dataitem = self.db.get(
_data_key(table, column, rowid),
txn=txn)
self.db.delete(
_data_key(table, column, rowid),
txn=txn)
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no
# error
dataitem = None
dataitem = mappings[column](dataitem)
if dataitem is not None:
self.db.put(
_data_key(table, column, rowid),
dataitem, txn=txn)
txn.commit()
txn = None
# catch all exceptions here since we call unknown callables
except:
if txn:
txn.abort()
raise
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def Delete(self, table, conditions={}):
"""Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions - a dictionary keyed on column names containing
condition functions expecting the data string as an
argument and returning a boolean.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# delete row data from all columns
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
# delete the data key
try:
self.db.delete(_data_key(table, column, rowid),
txn=txn)
except db.DBNotFoundError:
# XXXXXXX column may not exist, assume no error
pass
try:
self.db.delete(_rowid_key(table, rowid), txn=txn)
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no error
pass
txn.commit()
txn = None
except db.DBError, dberror:
if txn:
txn.abort()
raise
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def Select(self, table, columns, conditions={}):
"""Select(table, columns, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns - a list of which column data to return. If
columns is None, all columns will be returned.
* conditions - a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
try:
if not table in self.__tablecolumns:
self.__load_column_info(table)
if columns is None:
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
# return the matches as a list of dictionaries
return matching_rowids.values()
def __Select(self, table, columns, conditions):
"""__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
# check the validity of each column name
if not table in self.__tablecolumns:
self.__load_column_info(table)
if columns is None:
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# keyed on rows that match so far, containings dicts keyed on
# column names containing the data for that row and column.
matching_rowids = {}
# keys are rowids that do not match
rejected_rowids = {}
# attempt to sort the conditions in such a way as to minimize full
# column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
b = btuple[1]
if type(a) is type(b):
# Needed for python 3. "cmp" vanished in 3.0.1
def cmp(a, b) :
if a==b : return 0
if a<b : return -1
return 1
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
# longest prefix first
return cmp(len(b.prefix), len(a.prefix))
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
# longest likestr first
return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
return -1
if isinstance(b, ExactCond):
return 1
if isinstance(a, PrefixCond):
return -1
if isinstance(b, PrefixCond):
return 1
# leave all unknown condition callables alone as equals
return 0
if sys.version_info < (2, 6) :
conditionlist = conditions.items()
conditionlist.sort(cmp_conditions)
else : # Insertion Sort. Please, improve
conditionlist = []
for i in conditions.items() :
for j, k in enumerate(conditionlist) :
r = cmp_conditions(k, i)
if r == 1 :
conditionlist.insert(j, i)
break
else :
conditionlist.append(i)
# Apply conditions to column data to find what we want
cur = self.db.cursor()
column_num = -1
for column, condition in conditionlist:
column_num = column_num + 1
searchkey = _search_col_data_key(table, column)
# speedup: don't linear search columns within loop
if column in columns:
savethiscolumndata = 1 # save the data for return
else:
savethiscolumndata = 0 # data only used for selection
try:
key, data = cur.set_range(searchkey)
while key[:len(searchkey)] == searchkey:
# extract the rowid from the key
rowid = key[-_rowid_str_len:]
if not rowid in rejected_rowids:
# if no condition was specified or the condition
# succeeds, add row to our match list.
if not condition or condition(data):
if not rowid in matching_rowids:
matching_rowids[rowid] = {}
if savethiscolumndata:
matching_rowids[rowid][column] = data
else:
if rowid in matching_rowids:
del matching_rowids[rowid]
rejected_rowids[rowid] = rowid
key, data = cur.next()
except db.DBError, dberror:
if dberror.args[0] != db.DB_NOTFOUND:
raise
continue
cur.close()
# we're done selecting rows, garbage collect the reject list
del rejected_rowids
# extract any remaining desired column data from the
# database for the matching rows.
if len(columns) > 0:
for rowid, rowdata in matching_rowids.items():
for column in columns:
if column in rowdata:
continue
try:
rowdata[column] = self.db.get(
_data_key(table, column, rowid))
except db.DBError, dberror:
if sys.version_info < (2, 6) :
if dberror[0] != db.DB_NOTFOUND:
raise
else :
if dberror.args[0] != db.DB_NOTFOUND:
raise
rowdata[column] = None
# return the matches
return matching_rowids
def Drop(self, table):
"""Remove an entire table from the database"""
txn = None
try:
txn = self.env.txn_begin()
# delete the column list
self.db.delete(_columns_key(table), txn=txn)
cur = self.db.cursor(txn)
# delete all keys containing this tables column and row info
table_key = _search_all_data_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
# delete all rowids used by this table
table_key = _search_rowid_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
cur.close()
# delete the tablename from the table name list
tablelist = pickle.loads(
getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
txn=txn, flags=db.DB_RMW))
try:
tablelist.remove(table)
except ValueError:
# hmm, it wasn't there, oh well, that's what we want.
pass
# delete 1st, incase we opened with DB_DUP
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
if table in self.__tablecolumns:
del self.__tablecolumns[table]
except db.DBError, dberror:
if txn:
txn.abort()
raise TableDBError(dberror.args[1])
| apache-2.0 |
samuelbustamante/dirinfo | tickets/models.py | 1 | 2476 | # coding=utf-8
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
STATUS_OPEN = 0
STATUS_ON_HOLD = 1
STATUS_BELATED = 2
STATUS_RESOLVED = 3
STATUS_CLOSED = 4
PRIORITY_LOW = 0
PRIORITY_NORMAL = 1
PRIORITY_HIGHT = 2
PERMISSION_CAN_ASSIGNED = "can_assigned"
PERMISSION_CAN_CHANGE_STATUS = "can_change_status"
PERMISSION_CAN_VIEW_PRIORITY = "can_view_priority"
PERMISSION_CAN_CHANGE_PRIORITY = "can_change_priority"
class Ticket(models.Model):
"""
Ticket
"""
STATUS_CHOICES = (
(STATUS_OPEN, u'Abierto'),
(STATUS_ON_HOLD, u'En proceso'),
(STATUS_BELATED, u'Atrazado'),
(STATUS_RESOLVED, u'Resuelto'),
(STATUS_CLOSED, u'Cerrado'),
)
PRIORITY_CHOICES = (
(PRIORITY_LOW, u"Baja"),
(PRIORITY_NORMAL, u"Normal"),
(PRIORITY_HIGHT, u"Alta"),
)
title = models.CharField(max_length=64)
description = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, related_name="tickets_created")
status = models.PositiveSmallIntegerField(default=STATUS_OPEN,
choices=STATUS_CHOICES)
priority = models.PositiveSmallIntegerField(default=PRIORITY_NORMAL,
choices=PRIORITY_CHOICES)
assigned = models.ManyToManyField(User, null=True, blank=True,
related_name="tickets_assigned")
def get_absolute_url(self):
return reverse("tickets:detail", args=[str(self.id)])
def __unicode__(self):
return u"%s" % self.title
class Meta:
permissions = (
(PERMISSION_CAN_ASSIGNED, u"Can assigned"),
(PERMISSION_CAN_CHANGE_STATUS, u"Can change status"),
(PERMISSION_CAN_VIEW_PRIORITY, u"Can view priority"),
(PERMISSION_CAN_CHANGE_PRIORITY, u"Can change priority"),
)
ordering = ["-created_on"]
class Comment(models.Model):
"""
Comment
"""
comment = models.TextField()
created_by = models.ForeignKey(User)
created_on = models.DateTimeField(auto_now_add=True)
ticket = models.ForeignKey(Ticket, related_name="comments")
def __unicode__(self):
return u"%s" % self.comment
class Meta:
ordering = ["-created_on"]
verbose_name = u"comment"
verbose_name_plural = u"comments"
| apache-2.0 |
Salat-Cx65/python-for-android | python-build/python-libs/gdata/build/lib/gdata/sample_util.py | 133 | 7858 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = '[email protected] (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
def get_param(name, prompt='', secret=False, ask=True):
# First, check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
return sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
return sys.argv[i + 1]
if ask:
# If it was not on the command line, ask the user to input the value.
prompt = '%s: ' % prompt
if secret:
return getpass.getpass(prompt)
else:
return raw_input(prompt)
else:
return None
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if auth_type is None:
auth_type = int(get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n'))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n').split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = get_param('email', 'Please enter your username')
password = get_param('password', 'Password', True)
if service is None:
service = get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)')
if source is None:
source = get_param('source', ask=False)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = get_param('auth_sub_token', ask=False)
session_token = get_param('session_token', ask=False)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter')
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from the'
print ' URL. Example: "www.google.com/?token=ab12" token value is ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n'))
consumer_key = get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app')
if oauth_type == HMAC:
consumer_secret = get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True)
# Swap out this code once the client supports requesting an oauth token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your domain.')
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
| apache-2.0 |
nhmc/LAE | python_modules/cloudy/utils.py | 1 | 11349 | from barak.utilities import between
from barak.io import parse_config, readtxt
from scipy.integrate import simps
import numpy as np
from barak.constants import Ryd_Ang, pi, hplanck
import os
def get_data_path():
""" Return the path to the data directory for this package.
"""
return os.path.abspath(__file__).rsplit('/', 1)[0] + '/data/'
def lya_gamma_faucher(z):
""" The photoionisation rate of HI / 10^-12
Units are photons/s
from Faucher-Giguere et al. 2009 Table 2,
http://adsabs.harvard.edu/abs/2009ApJ...703.1416F
"""
gamma = (0.0384, 0.0728, 0.1295, 0.2082, 0.3048, 0.4074, 0.4975,
0.5630, 0.6013, 0.6142, 0.6053, 0.5823, 0.5503, 0.5168,
0.4849, 0.4560, 0.4320, 0.4105, 0.3917, 0.3743, 0.3555,
0.3362, 0.3169, 0.3001, 0.2824, 0.2633, 0.2447, 0.2271,
0.2099)
zvals = 0.25 * np.arange(len(gamma))
return np.interp(z, zvals, gamma)
def hcross_section(energy):
""" The photoionization cross section of HI in units of cm^2
at the given energies.
Energy must have units of Rydbergs. From Ferland & Osterbrock page
20. """
energy = np.atleast_1d(energy)
a_nu = np.zeros(len(energy), float)
A0 = 6.304e-18
c0 = energy > 1
if c0.any():
eps = np.sqrt(energy[c0] - 1.)
num = np.exp(4. - ((4 * np.arctan(eps)) / eps))
den = 1. - np.exp(-2. * pi/eps)
a_nu[c0] = A0 * (1. / energy[c0])**4 * num / den
a_nu[energy == 1] = A0
return a_nu
def find_gamma(energy, jnu):
""" Find the photoionization rate / 1e12 given the spectrum as a
function of energy in Rydbergs.
in units of photons / s.
This is copying JFH's code in cldy_cuba_jfh.pro.
"""
sigma_nu = hcross_section(energy)
# output is in units of 1e-12
integrand = 4. * pi * jnu * sigma_nu / hplanck * 1e12
log_energy = np.log10(energy)
isort = np.argsort(log_energy)
# don't understand why this works...
gamma = np.log(10) * simps(integrand[isort], x=log_energy[isort])
return gamma
def read_observed(filename):
""" Read a config-style file with ions and column densities.
Each ion has three lines, first line is log10 of N (cm^-2),second
and third lines are lower and upper errors. Blacnk lines and lines
starting with '#' are ignored.
An example file:
# column densities for the component showing D, to be compared to
# cloudy models.
# logN, logN sigma down, logN sigma up (same as up if not given).
# if sigma low is 0, then the first value is a lower limit. sigma
# high is 0, than the first value is an upper limit.
# Errors are 1 sigma from vpfit
HI = 14.88 0.01
AlIII = 10.79 0.38
AlII = 10.97 0.16
CII = 12.04 0.15
MgII = 11.33 0.17
SiIV = 12.495 0.012
CIV = 13.369 0.006
# lower limit (saturated). We assume the upper limit is equal
# to NHI
CIII = 13.0 0 5
# upper limits, either blends or non-detections
SiII = 11.77 5 0
SiIII = 12.665 5 0
OI = 12.407 5 0
NII = 13.283 5 0
"""
obs = parse_config(filename)
for k in obs:
# skip entries that are used for the priors when fitting
# with emcee
if k.startswith('min ') or k.startswith('max '):
continue
vals = map(float, obs[k].split())
if len(vals) == 2:
obs[k] = vals[0], vals[1], vals[1]
elif len(vals) == 3:
obs[k] = tuple(vals)
else:
raise ValueError('Error parsing entry %s' % obs[k])
return obs
def get_ratio(a, b):
""" Measure minimum and maximum ratio for a/b"""
return (a[0]-a[1]) - (b[0]+b[2]), a[0]-b[0], (a[0]+a[2]) - (b[0]-b[1])
def calc_uvb(redshift, cuba_name, match_fg=False):
""" Calculate the UV background for a Haardt-Madau model.
Returns a dictionary with information about the UV model:
======= ===========================================================
energy energy in Rydbergs
logjnu log10 of the Intensity at each energy (erg/s/cm^2/Hz/ster)
mult multipler that was applied to jnu to match Lya forest Gamma
measurements
======= ===========================================================
"""
if cuba_name.endswith('UVB.out'):
redshifts, wave, jnu_z = read_cuba(cuba_name)
else:
redshifts, wave, jnu_z = read_XIDL_cuba(cuba_name)
jnu0 = interp_cuba_to_z(redshift, redshifts, jnu_z)
energy0 = Ryd_Ang / wave # ergs
isort = energy0.argsort()
energy = energy0[isort]
jnu = jnu0[isort]
# scale to match faucher-giguere background
mult = 1
if match_fg:
gamma_fg = lya_gamma_faucher(redshift)
mult = gamma_fg / find_gamma(energy, jnu)
print 'Scaling Jnu by %.3g to match FG gamma' % mult
jnu *= mult
logjnu = np.where(jnu > 1e-30, np.log10(jnu), -30)
return dict(energy=energy, logjnu=logjnu, mult=mult)
def calc_local_jnu(wa, logFtot, distkpc, f_esc=1, NHI_fesc=1e20):
"""
Parameters
----------
wa : array_like, shape(N,)
Wavelengths for the input spectrum in Angstroms.
logFtot : array_like, shape (N,)
log10 of the total flux from the galaxy in erg/s/Ang.
distkpc : float
Distance at which to calculate Jnu in kpc.
f_esc : float
Escape fraction (<= 1). Default 1.
NHI : float
See Cantalupo 2010, MNRAS, 403, L16. Default 1e20.
returns
-------
nu, logJnu : arrays of shape (N,)
The frequency (Hz) and log10 of the intensity of the output spectrum
(erg/s/cm^2/Hz/ster) at the given distance.
"""
from barak.constants import kpc, c, hplanck, Ryd
wa_cm = wa * 1e-8
# erg/s/cm
fwatot_cm = 10**logFtot * 1e8
# total area in cm^2 over which this energy is spread
area = 4 * np.pi * (distkpc * kpc)**2
fwa_local = fwatot_cm / area
# fnu = lambda^2 / c * f_lambda
Fnu = wa_cm**2 / c * fwa_local
# erg/s/cm^2/Hz/ster
Jnu = Fnu / (4 * np.pi)
nu = c / wa_cm
nu = nu[::-1]
Jnu = Jnu[::-1]
if f_esc < 1:
sigma_nu = hcross_section(nu * hplanck / Ryd)
cond = (nu * hplanck / Ryd) > 1.
Jnu[cond] = Jnu[cond] * (
f_esc + (1 - f_esc) * np.exp(-1 * sigma_nu[cond] * NHI_fesc) )
return nu, np.log10(Jnu)
def tilt_spec(k, energy, log10jnu, emin=1., emax=10.):
""" Tilt the input spectrum between emin and emax by k.
The spectrum < emin is unaffected, but is given a constant shift
above emax to avoid a discontinuity.
Parameters
----------
k : float
Tilt parameter. 0 means no tilt, a positive value makes the
spectrum shallower, and a negative value makes it steeper.
energy : array of shape(n,)
Energy in same units as emin, emax (default is Rydbergs)
log10jnu : array of shape(n,)
log10 of the intensity.
emin, emax : floats
The start and end energies in Rydbergs of the region to which
the tilt is applied.
Returns
-------
log10jnu_tilted : array of shape (n,)
The input spectrum with a tilt applied.
"""
new = np.array(log10jnu, copy=True)
c0 = between(energy, emin, emax)
new[c0] = log10jnu[c0] + k * (np.log10(energy[c0]) - np.log10(emin))
c1 = energy > emax
new[c1] = log10jnu[c1] + (new[c0][-1] - log10jnu[c0][-1])
return new
def read_starburst99(filename):
""" Read a spectrum generated by starburst99.
Takes the spectrum with the latest time.
Returns
-------
wa, F: ndarrays, shape (N,)
wavelength in Angstroms and log10(Flux in erg/s/A)
"""
T = np.genfromtxt(filename, skiprows=6, names='time,wa,tot,star,neb')
times = np.unique(T['time'])
cond = T['time'] == times[-1]
wa = T['wa'][cond]
F = T['tot'][cond]
return wa, F
def read_XIDL_cuba(filename):
""" Parse a Haart & Madau CUBA file as given in XIDL.
return jnu as a function of wavelength and redshift. Removes
duplicate wavelength points.
Returns
-------
redshifts : array of floats with shape (N,)
wa : array of floats with shape (M,)
Wavelengths in Angstroms.
jnu : array of floats with shape (N, M)
Flux in erg/s/Hz/cm^2/sr.
"""
fh = open(filename)
rows = fh.readlines()
fh.close()
redshifts, wa, jnu = [], [], []
# this is the row index
i = 0
# another index keep track of which set of redshifts we're at
indz = 0
while i < len(rows):
# Read the line of redshifts
zvals = [float(val) for val in rows[i].split()[2:]]
redshifts.extend(zvals)
# need a jnu array for each redshift
jnu.extend([] for z in zvals)
i += 1
# Read jnu values and wavelengths until we hit another row of
# redshifts or the end of the file
while i < len(rows) and not rows[i].lstrip().startswith('#'):
#print i, rows[i]
items = []
for val in rows[i].split():
try:
items.append(float(val))
except ValueError:
if not(val.endswith('-310') or
val.endswith('-320')):
print 'replacing jnu value of ', val, 'with 0'
items.append(0.0)
if indz == 0:
wa.append(items[0])
for j,jnu_val in enumerate(items[1:]):
jnu[indz+j].append(jnu_val)
i += 1
indz += len(zvals)
redshifts, wa, jnu = (np.array(a) for a in (redshifts,wa,jnu))
# remove duplicates
_, iuniq = np.unique(wa, return_index=True)
wa1 = wa[iuniq]
jnu1 = jnu[:, iuniq]
return redshifts, wa1, jnu1
def read_cuba(filename):
""" Parse a Haart & Madau CUBA file.
return jnu as a function of wavelength and redshift. Removes
duplicate wavelength points.
Returns
-------
redshifts : array of floats with shape (N,)
wa : array of floats with shape (M,)
Wavelengths in Angstroms.
jnu : array of floats with shape (N, M)
Flux in erg/s/Hz/cm^2/sr.
"""
# first read the redshifts
fh = open(filename)
for row in fh:
r = row.strip()
if not r or r.startswith('#'):
continue
redshifts = np.array(map(float, r.split()))
break
# then the wavelenths and fnu values
cols = readtxt(filename, skip=1)
assert len(cols) == len(redshifts) + 1
wa = cols[0]
jnu = np.array(cols[1:])
# remove duplicates
_, iuniq = np.unique(wa, return_index=True)
wa1 = wa[iuniq]
jnu1 = jnu[:, iuniq]
return redshifts, wa1, jnu1
def interp_cuba_to_z(ztarget, redshifts, jnu):
""" Find jnu as a function of wavelength at the target redshift.
Linearly interpolates between redshifts for the 2d array jnu.
Inputs
------
ztarget : float
redshifts : array of floats, shape (N,)
jnu : array of floats, shape (N, M)
Returns
-------
jnu_at_ztarget : array of floats shape (M,)
"""
Nwa = jnu.shape[1]
jnu_out = [np.interp(ztarget, redshifts, jnu.T[i]) for i in xrange(Nwa)]
return np.array(jnu_out)
| mit |