diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..648632ce --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +* text=auto +*.py text diff=python diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4f2b0741 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +*\.py[co] +*~ +*\.orig +*\#.* +*@.* +.coverage +htmlcov +.DS_Store +venv +pyvenv +distribute_setup.py +distribute-*.tar.gz +build +dist +*.egg-info +.tox +.idea/ +*.iml +trollius/_overlapped.pyd diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..5a2c7d7b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,18 @@ +language: python + +os: + - linux + - osx + +python: + - 3.3 + - 3.4 + - "nightly" + +install: + - pip install asyncio + - python setup.py install + +script: + - python runtests.py + - PYTHONASYNCIODEBUG=1 python runtests.py diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..3c3966b9 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,14 @@ +Trollius authors +================ + +Ian Wienand +Marc Schlaich +Victor Stinner - creator of the Trollius project + +The photo of Trollis flower was taken by Imartin6 and distributed under the CC +BY-SA 3.0 license. It comes from: +http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg + +Trollius is a port of the asyncio project on Python 2, see also authors of the +asyncio project (AUTHORS file). + diff --git a/CHANGES.rst b/CHANGES.rst new file mode 100644 index 00000000..f2e31294 --- /dev/null +++ b/CHANGES.rst @@ -0,0 +1,735 @@ +========= + Changes +========= + +.. warning:: + The Trollius project is now deprecated! + + +2.2.2 (unreleased) +================== + +- Nothing changed yet. + + +2.2.1 (2021-04-28) +================== + +- Properly reraise socket.error with an errno of EBADF as an OSError. + +2.2.post1 (2019-07-29) +====================== + +This is a packaging-only release. It is intended to be the last +release. + +- Release Windows wheels for CPython 2.7. +- Use ``python_requires`` to restrict installation to Python 2.7. + +Version 2.2 (2018-03-09) +======================== + +Changes: + +* ``run_aiotest.py`` has been removed since the ``aiotest`` project has been + removed +* Add the "No Maintenance Intended" badge to README +* The Trollius documentation is no longer online: + http://trollius.readthedocs.io/ has been removed +* Update the GitHub URL to: https://github.com/vstinner/trollius + +Version 2.1 (2016-02-05) +======================== + +Changes: + +* The Trollius project is now deprecated. +* Ugly hack to support Python 3.5 with the PEP 479. asyncio coroutines are + not supported on Python 3.5. +* Better exception traceback. Patch written by Dhawal Yogesh Bhanushali. +* Drop support for Python 2.6 and 3.2. +* Fix tests on Windows with Python 2. Patch written by Gabi Davar. + + +Version 2.0 (2015-07-13) +======================== + +Summary: + +* SSL support on Windows for proactor event loop with Python 3.5 and newer +* Many race conditions were fixed in the proactor event loop +* Trollius moved to Github and the fork was recreated on top to asyncio git + repository +* Many resource leaks (ex: unclosed sockets) were fixed +* Optimization of socket connections: avoid: don't call the slow getaddrinfo() + function to ensure that the address is already resolved. The check is now + only done in debug mode. + +The Trollius project moved from Bitbucket to Github. The project is now a fork +of the Git repository of the asyncio project (previously called the "tulip" +project), the trollius source code lives in the trollius branch. + +The new Trollius home page is now: https://github.com/haypo/trollius + +The asyncio project moved to: https://github.com/python/asyncio + +Note: the PEP 492 is not supported in trollius yet. + +API changes: + +* Issue #234: Drop JoinableQueue on Python 3.5+ +* add the asyncio.ensure_future() function, previously called async(). + The async() function is now deprecated. +* New event loop methods: set_task_factory() and get_task_factory(). +* Python issue #23347: Make BaseSubprocessTransport.wait() private. +* Python issue #23347: send_signal(), kill() and terminate() methods of + BaseSubprocessTransport now check if the transport was closed and if the + process exited. +* Python issue #23209, #23225: selectors.BaseSelector.get_key() now raises a + RuntimeError if the selector is closed. And selectors.BaseSelector.close() + now clears its internal reference to the selector mapping to break a + reference cycle. Initial patch written by Martin Richard. +* PipeHandle.fileno() of asyncio.windows_utils now raises an exception if the + pipe is closed. +* Remove Overlapped.WaitNamedPipeAndConnect() of the _overlapped module, + it is no more used and it had issues. +* Python issue #23537: Remove 2 unused private methods of + BaseSubprocessTransport: _make_write_subprocess_pipe_proto, + _make_read_subprocess_pipe_proto. Methods only raise NotImplementedError and + are never used. +* Remove unused SSLProtocol._closing attribute + +New SSL implementation: + +* Python issue #22560: On Python 3.5 and newer, use new SSL implementation + based on ssl.MemoryBIO instead of the legacy SSL implementation. Patch + written by Antoine Pitrou, based on the work of Geert Jansen. +* If available, the new SSL implementation can be used by ProactorEventLoop to + support SSL. + +Enhance, fix and cleanup the IocpProactor: + +* Python issue #23293: Rewrite IocpProactor.connect_pipe(). Add + _overlapped.ConnectPipe() which tries to connect to the pipe for asynchronous + I/O (overlapped): call CreateFile() in a loop until it doesn't fail with + ERROR_PIPE_BUSY. Use an increasing delay between 1 ms and 100 ms. +* Tulip issue #204: Fix IocpProactor.accept_pipe(). + Overlapped.ConnectNamedPipe() now returns a boolean: True if the pipe is + connected (if ConnectNamedPipe() failed with ERROR_PIPE_CONNECTED), False if + the connection is in progress. +* Tulip issue #204: Fix IocpProactor.recv(). If ReadFile() fails with + ERROR_BROKEN_PIPE, the operation is not pending: don't register the + overlapped. +* Python issue #23095: Rewrite _WaitHandleFuture.cancel(). + _WaitHandleFuture.cancel() now waits until the wait is cancelled to clear its + reference to the overlapped object. To wait until the cancellation is done, + UnregisterWaitEx() is used with an event instead of UnregisterWait(). +* Python issue #23293: Rewrite IocpProactor.connect_pipe() as a coroutine. Use + a coroutine with asyncio.sleep() instead of call_later() to ensure that the + scheduled call is cancelled. +* Fix ProactorEventLoop.start_serving_pipe(). If a client was connected before + the server was closed: drop the client (close the pipe) and exit +* Python issue #23293: Cleanup IocpProactor.close(). The special case for + connect_pipe() is no more needed. connect_pipe() doesn't use overlapped + operations anymore. +* IocpProactor.close(): don't cancel futures which are already cancelled +* Enhance (fix) BaseProactorEventLoop._loop_self_reading(). Handle correctly + CancelledError: just exit. On error, log the exception and exit; don't try to + close the event loop (it doesn't work). + +Bug fixes: + +* Fix LifoQueue's and PriorityQueue's put() and task_done(). +* Issue #222: Fix the @coroutine decorator for functions without __name__ + attribute like functools.partial(). Enhance also the representation of a + CoroWrapper if the coroutine function is a functools.partial(). +* Python issue #23879: SelectorEventLoop.sock_connect() must not call connect() + again if the first call to connect() raises an InterruptedError. When the C + function connect() fails with EINTR, the connection runs in background. We + have to wait until the socket becomes writable to be notified when the + connection succeed or fails. +* Fix _SelectorTransport.__repr__() if the event loop is closed +* Fix repr(BaseSubprocessTransport) if it didn't start yet +* Workaround CPython bug #23353. Don't use yield/yield-from in an except block + of a generator. Store the exception and handle it outside the except block. +* Fix BaseSelectorEventLoop._accept_connection(). Close the transport on error. + In debug mode, log errors using call_exception_handler(). +* Fix _UnixReadPipeTransport and _UnixWritePipeTransport. Only start reading + when connection_made() has been called. +* Fix _SelectorSslTransport.close(). Don't call protocol.connection_lost() if + protocol.connection_made() was not called yet: if the SSL handshake failed or + is still in progress. The close() method can be called if the creation of the + connection is cancelled, by a timeout for example. +* Fix _SelectorDatagramTransport constructor. Only start reading after + connection_made() has been called. +* Fix _SelectorSocketTransport constructor. Only start reading when + connection_made() has been called: protocol.data_received() must not be + called before protocol.connection_made(). +* Fix SSLProtocol.eof_received(). Wake-up the waiter if it is not done yet. +* Close transports on error. Fix create_datagram_endpoint(), + connect_read_pipe() and connect_write_pipe(): close the transport if the task + is cancelled or on error. +* Close the transport on subprocess creation failure +* Fix _ProactorBasePipeTransport.close(). Set the _read_fut attribute to None + after cancelling it. +* Python issue #23243: Fix _UnixWritePipeTransport.close(). Do nothing if the + transport is already closed. Before it was not possible to close the + transport twice. +* Python issue #23242: SubprocessStreamProtocol now closes the subprocess + transport at subprocess exit. Clear also its reference to the transport. +* Fix BaseEventLoop._create_connection_transport(). Close the transport if the + creation of the transport (if the waiter) gets an exception. +* Python issue #23197: On SSL handshake failure, check if the waiter is + cancelled before setting its exception. +* Python issue #23173: Fix SubprocessStreamProtocol.connection_made() to handle + cancelled waiter. +* Python issue #23173: If an exception is raised during the creation of a + subprocess, kill the subprocess (close pipes, kill and read the return + status). Log an error in such case. +* Python issue #23209: Break some reference cycles in asyncio. Patch written by + Martin Richard. + +Optimization: + +* Only call _check_resolved_address() in debug mode. _check_resolved_address() + is implemented with getaddrinfo() which is slow. If available, use + socket.inet_pton() instead of socket.getaddrinfo(), because it is much faster + +Other changes: + +* Python issue #23456: Add missing @coroutine decorators +* Python issue #23475: Fix test_close_kill_running(). Really kill the child + process, don't mock completly the Popen.kill() method. This change fix memory + leaks and reference leaks. +* BaseSubprocessTransport: repr() mentions when the child process is running +* BaseSubprocessTransport.close() doesn't try to kill the process if it already + finished. +* Tulip issue #221: Fix docstring of QueueEmpty and QueueFull +* Fix subprocess_attach_write_pipe example. Close the transport, not directly + the pipe. +* Python issue #23347: send_signal(), terminate(), kill() don't check if the + transport was closed. The check broken a Tulip example and this limitation is + arbitrary. Check if _proc is None should be enough. Enhance also close(): do + nothing when called the second time. +* Python issue #23347: Refactor creation of subprocess transports. +* Python issue #23243: On Python 3.4 and newer, emit a ResourceWarning when an + event loop or a transport is not explicitly closed +* tox.ini: enable ResourceWarning warnings +* Python issue #23243: test_sslproto: Close explicitly transports +* SSL transports now clear their reference to the waiter. +* Python issue #23208: Add BaseEventLoop._current_handle. In debug mode, + BaseEventLoop._run_once() now sets the BaseEventLoop._current_handle + attribute to the handle currently executed. +* Replace test_selectors.py with the file of Python 3.5 adapted for asyncio and + Python 3.3. +* Tulip issue #184: FlowControlMixin constructor now get the event loop if the + loop parameter is not set. +* _ProactorBasePipeTransport now sets the _sock attribute to None when the + transport is closed. +* Python issue #23219: cancelling wait_for() now cancels the task +* Python issue #23243: Close explicitly event loops and transports in tests +* Python issue #23140: Fix cancellation of Process.wait(). Check the state of + the waiter future before setting its result. +* Python issue #23046: Expose the BaseEventLoop class in the asyncio namespace +* Python issue #22926: In debug mode, call_soon(), call_at() and call_later() + methods of BaseEventLoop now use the identifier of the current thread to + ensure that they are called from the thread running the event loop. Before, + the get_event_loop() method was used to check the thread, and no exception + was raised when the thread had no event loop. Now the methods always raise an + exception in debug mode when called from the wrong thread. It should help to + notice misusage of the API. + +2014-12-19: Version 1.0.4 +========================= + +Changes: + +* Python issue #22922: create_task(), call_at(), call_soon(), + call_soon_threadsafe() and run_in_executor() now raise an error if the event + loop is closed. Initial patch written by Torsten Landschoff. +* Python issue #22921: Don't require OpenSSL SNI to pass hostname to ssl + functions. Patch by Donald Stufft. +* Add run_aiotest.py: run the aiotest test suite. +* tox now also run the aiotest test suite +* Python issue #23074: get_event_loop() now raises an exception if the thread + has no event loop even if assertions are disabled. + +Bugfixes: + +* Fix a race condition in BaseSubprocessTransport._try_finish(): ensure that + connection_made() is called before connection_lost(). +* Python issue #23009: selectors, make sure EpollSelecrtor.select() works when + no file descriptor is registered. +* Python issue #22922: Fix ProactorEventLoop.close(). Call + _stop_accept_futures() before sestting the _closed attribute, otherwise + call_soon() raises an error. +* Python issue #22429: Fix EventLoop.run_until_complete(), don't stop the event + loop if a BaseException is raised, because the event loop is already stopped. +* Initialize more Future and Task attributes in the class definition to avoid + attribute errors in destructors. +* Python issue #22685: Set the transport of stdout and stderr StreamReader + objects in the SubprocessStreamProtocol. It allows to pause the transport to + not buffer too much stdout or stderr data. +* BaseSelectorEventLoop.close() now closes the self-pipe before calling the + parent close() method. If the event loop is already closed, the self-pipe is + not unregistered from the selector. + + +2014-10-20: Version 1.0.3 +========================= + +Changes: + +* On Python 2 in debug mode, Future.set_exception() now stores the traceback + object of the exception in addition to the exception object. When a task + waiting for another task and the other task raises an exception, the + traceback object is now copied with the exception. Be careful, storing the + traceback object may create reference leaks. +* Use ssl.create_default_context() if available to create the default SSL + context: Python 2.7.9 and newer, or Python 3.4 and newer. +* On Python 3.5 and newer, reuse socket.socketpair() in the windows_utils + submodule. +* On Python 3.4 and newer, use os.set_inheritable(). +* Enhance protocol representation: add "closed" or "closing" info. +* run_forever() now consumes BaseException of the temporary task. If the + coroutine raised a BaseException, consume the exception to not log a warning. + The caller doesn't have access to the local task. +* Python issue 22448: cleanup _run_once(), only iterate once to remove delayed + calls that were cancelled. +* The destructor of the Return class now shows where the Return object was + created. +* run_tests.py doesn't catch any exceptions anymore when loading tests, only + catch SkipTest. +* Fix (SSL) tests for the future Python 2.7.9 which includes a "new" ssl + module: module backported from Python 3.5. +* BaseEventLoop.add_signal_handler() now raises an exception if the parameter + is a coroutine function. +* Coroutine functions and objects are now rejected with a TypeError by the + following functions: add_signal_handler(), call_at(), call_later(), + call_soon(), call_soon_threadsafe(), run_in_executor(). + + +2014-10-02: Version 1.0.2 +========================= + +This release fixes bugs. It also provides more information in debug mode on +error. + +Major changes: + +* Tulip issue #203: Add _FlowControlMixin.get_write_buffer_limits() method. +* Python issue #22063: socket operations (socket,recv, sock_sendall, + sock_connect, sock_accept) of SelectorEventLoop now raise an exception in + debug mode if sockets are in blocking mode. + +Major bugfixes: + +* Tulip issue #205: Fix a race condition in BaseSelectorEventLoop.sock_connect(). +* Tulip issue #201: Fix a race condition in wait_for(). Don't raise a + TimeoutError if we reached the timeout and the future completed in the same + iteration of the event loop. A side effect of the bug is that Queue.get() + looses items. +* PipeServer.close() now cancels the "accept pipe" future which cancels the + overlapped operation. + +Other changes: + +* Python issue #22448: Improve cancelled timer callback handles cleanup. Patch + by Joshua Moore-Oliva. +* Python issue #22369: Change "context manager protocol" to "context management + protocol". Patch written by Serhiy Storchaka. +* Tulip issue #206: In debug mode, keep the callback in the representation of + Handle and TimerHandle after cancel(). +* Tulip issue #207: Fix test_tasks.test_env_var_debug() to use correct asyncio + module. +* runtests.py: display a message to mention if tests are run in debug or + release mode +* Tulip issue #200: Log errors in debug mode instead of simply ignoring them. +* Tulip issue #200: _WaitHandleFuture._unregister_wait() now catchs and logs + exceptions. +* _fatal_error() method of _UnixReadPipeTransport and _UnixWritePipeTransport + now log all exceptions in debug mode +* Fix debug log in BaseEventLoop.create_connection(): get the socket object + from the transport because SSL transport closes the old socket and creates a + new SSL socket object. +* Remove the _SelectorSslTransport._rawsock attribute: it contained the closed + socket (not very useful) and it was not used. +* Fix _SelectorTransport.__repr__() if the transport was closed +* Use the new os.set_blocking() function of Python 3.5 if available + + +2014-07-30: Version 1.0.1 +========================= + +This release supports PyPy and has a better support of asyncio coroutines, +especially in debug mode. + +Changes: + +* Tulip issue #198: asyncio.Condition now accepts an optional lock object. +* Enhance representation of Future and Future subclasses: add "created at". + +Bugfixes: + +* Fix Trollius issue #9: @trollius.coroutine now works on callbable objects + (without ``__name__`` attribute), not only on functions. +* Fix Trollius issue #13: asyncio futures are now accepted in all functions: + as_completed(), async(), @coroutine, gather(), run_until_complete(), + wrap_future(). +* Fix support of asyncio coroutines in debug mode. If the last instruction + of the coroutine is "yield from", it's an asyncio coroutine and it does not + need to use From(). +* Fix and enhance _WaitHandleFuture.cancel(): + + - Tulip issue #195: Fix a crash on Windows: don't call UnregisterWait() twice + if a _WaitHandleFuture is cancelled twice. + - Fix _WaitHandleFuture.cancel(): return the result of the parent cancel() + method (True or False). + - _WaitHandleFuture.cancel() now notify IocpProactor through the overlapped + object that the wait was cancelled. + +* Tulip issue #196: _OverlappedFuture now clears its reference to the + overlapped object. IocpProactor keeps a reference to the overlapped object + until it is notified of its completion. Log also an error in debug mode if it + gets unexpected notifications. +* Fix runtest.py to be able to log at level DEBUG. + +Other changes: + +* BaseSelectorEventLoop._write_to_self() now logs errors in debug mode. +* Fix as_completed(): it's not a coroutine, don't use ``yield From(...)`` but + ``yield ...`` +* Tulip issue #193: Convert StreamWriter.drain() to a classic coroutine. +* Tulip issue #194: Don't use sys.getrefcount() in unit tests: the full test + suite now pass on PyPy. + + +2014-07-21: Version 1.0 +======================= + +Major Changes +------------- + +* Event loops have a new ``create_task()`` method, which is now the recommanded + way to create a task object. This method can be overriden by third-party + event loops to use their own task class. +* The debug mode has been improved a lot. Set ``TROLLIUSDEBUG`` envrironment + variable to ``1`` and configure logging to log at level ``logging.DEBUG`` + (ex: ``logging.basicConfig(level=logging.DEBUG)``). Changes: + + - much better representation of Trollius objects (ex: ``repr(task)``): + unified ```` format, use qualified name when available + - show the traceback where objects were created + - show the current filename and line number for coroutine + - show the filename and line number where objects were created + - log most important socket events + - log most important subprocess events + +* ``Handle.cancel()`` now clears references to callback and args +* Log an error if a Task is destroyed while it is still pending, but only on + Python 3.4 and newer. +* Fix for asyncio coroutines when passing tuple value in debug mode. + ``CoroWrapper.send()`` now checks if it is called from a "yield from" + generator to decide if the parameter should be unpacked or not. +* ``Process.communicate()`` now ignores ``BrokenPipeError`` and + ``ConnectionResetError`` exceptions. +* Rewrite signal handling on Python 3.3 and newer to fix a race condition: use + the "self-pipe" to get signal numbers. + + +Other Changes +------------- + +* Fix ``ProactorEventLoop()`` in debug mode +* Fix a race condition when setting the result of a Future with + ``call_soon()``. Add an helper, a private method, to set the result only if + the future was not cancelled. +* Fix ``asyncio.__all__``: export also ``unix_events`` and ``windows_events`` + symbols. For example, on Windows, it was not possible to get + ``ProactorEventLoop`` or ``DefaultEventLoopPolicy`` using ``from asyncio + import *``. +* ``Handle.cancel()`` now clears references to callback and args +* Make Server attributes and methods private, the sockets attribute remains + public. +* BaseEventLoop.create_datagram_endpoint() now waits until + protocol.connection_made() has been called. Document also why transport + constructors use a waiter. +* _UnixSubprocessTransport: fix file mode of stdin: open stdin in write mode, + not in read mode. + + +2014-06-23: version 0.4 +======================= + +Changes between Trollius 0.3 and 0.4: + +* Trollius event loop now supports asyncio coroutines: + + - Trollius coroutines can yield asyncio coroutines, + - asyncio coroutines can yield Trollius coroutines, + - asyncio.set_event_loop() accepts a Trollius event loop, + - asyncio.set_event_loop_policy() accepts a Trollius event loop policy. + +* The ``PYTHONASYNCIODEBUG`` envrionment variable has been renamed to + ``TROLLIUSDEBUG``. The environment variable is now used even if the Python + command line option ``-E`` is used. +* Synchronize with Tulip. +* Support PyPy (fix subproces, fix unit tests). + +Tulip changes: + +* Tulip issue #171: BaseEventLoop.close() now raises an exception if the event + loop is running. You must first stop the event loop and then wait until it + stopped, before closing it. +* Tulip issue #172: only log selector timing in debug mode +* Enable the debug mode of event loops when the ``TROLLIUSDEBUG`` environment + variable is set +* BaseEventLoop._assert_is_current_event_loop() now only raises an exception if + the current loop is set. +* Tulip issue #105: in debug mode, log callbacks taking more than 100 ms to be + executed. +* Python issue 21595: ``BaseSelectorEventLoop._read_from_self()`` reads all + available bytes from the "self pipe", not only a single byte. This change + reduces the risk of having the pipe full and so getting the "BlockingIOError: + [Errno 11] Resource temporarily unavailable" message. +* Python issue 21723: asyncio.Queue: support any type of number (ex: float) for + the maximum size. Patch written by Vajrasky Kok. +* Issue #173: Enhance repr(Handle) and repr(Task): add the filename and line + number, when available. For task, the current line number of the coroutine + is used. +* Add BaseEventLoop.is_closed() method. run_forever() and run_until_complete() + methods now raises an exception if the event loop was closed. +* Make sure that socketpair() close sockets on error. Close the listening + socket if sock.bind() raises an exception. +* Fix ResourceWarning: close sockets on errors. + BaseEventLoop.create_connection(), BaseEventLoop.create_datagram_endpoint() + and _UnixSelectorEventLoop.create_unix_server() now close the newly created + socket on error. +* Rephrase and fix docstrings. +* Fix tests on Windows: wait for the subprocess exit. Before, regrtest failed + to remove the temporary test directory because the process was still running + in this directory. +* Refactor unit tests. + +On Python 3.5, generators now get their name from the function, no more from +the code. So the ``@coroutine`` decorator doesn't loose the original name of +the function anymore. + + +2014-05-26: version 0.3 +======================= + +Rename the Python module ``asyncio`` to ``trollius`` to support Python 3.4. On +Python 3.4, there is already a module called ``asyncio`` in the standard +library which conflicted with ``asyncio`` module of Trollius 0.2. To write +asyncio code working on Trollius and Tulip, use ``import trollius as asyncio``. + +Changes between Trollius 0.2 and 0.3: + +* Synchronize with Tulip 3.4.1. +* Enhance Trollius documentation. +* Trollius issue #7: Fix ``asyncio.time_monotonic`` on Windows older than + Vista (ex: Windows 2000 and Windows XP). +* Fedora packages have been accepted. + +Changes between Tulip 3.4.0 and 3.4.1: + +* Pull in Solaris ``devpoll`` support by Giampaolo Rodola + (``trollius.selectors`` module). +* Add options ``-r`` and ``--randomize`` to runtests.py to randomize test + order. +* Add a simple echo client/server example. +* Tulip issue #166: Add ``__weakref__`` slots to ``Handle`` and + ``CoroWrapper``. +* ``EventLoop.create_unix_server()`` now raises a ``ValueError`` if path and + sock are specified at the same time. +* Ensure ``call_soon()``, ``call_later()`` and ``call_at()`` are invoked on + current loop in debug mode. Raise a ``RuntimeError`` if the event loop of the + current thread is different. The check should help to debug thread-safetly + issue. Patch written by David Foster. +* Tulip issue #157: Improve test_events.py, avoid ``run_briefly()`` which is + not reliable. +* Reject add/remove reader/writer when event loop is closed. + +Bugfixes of Tulip 3.4.1: + +* Tulip issue #168: ``StreamReader.read(-1)`` from pipe may hang if + data exceeds buffer limit. +* CPython issue #21447: Fix a race condition in + ``BaseEventLoop._write_to_self()``. +* Different bugfixes in ``CoroWrapper`` of ``trollius.coroutines``, class used + when running Trollius in debug mode: + + - Fix ``CoroWrapper`` to workaround yield-from bug in CPython 3.4.0. The + CPython bug is now fixed in CPython 3.4.1 and 3.5. + - Make sure ``CoroWrapper.send`` proxies one argument correctly. + - CPython issue #21340: Be careful accessing instance variables in ``__del__``. + - Tulip issue #163: Add ``gi_{frame,running,code}`` properties to + ``CoroWrapper``. + +* Fix ``ResourceWarning`` warnings +* Tulip issue #159: Fix ``windows_utils.socketpair()``. Use ``"127.0.0.1"`` + (IPv4) or ``"::1"`` (IPv6) host instead of ``"localhost"``, because + ``"localhost"`` may be a different IP address. Reject also invalid arguments: + only ``AF_INET`` and ``AF_INET6`` with ``SOCK_STREAM`` (and ``proto=0``) are + supported. +* Tulip issue #158: ``Task._step()`` now also sets ``self`` to ``None`` if an + exception is raised. ``self`` is set to ``None`` to break a reference cycle. + + +2014-03-04: version 0.2 +======================= + +Trollius now uses ``yield From(...)`` syntax which looks close to Tulip ``yield +from ...`` and allows to port more easily Trollius code to Tulip. The usage of +``From()`` is not mandatory yet, but it may become mandatory in a future +version. However, if ``yield`` is used without ``From``, an exception is +raised if the event loop is running in debug mode. + +Major changes: + +* Replace ``yield ...`` syntax with ``yield From(...)`` +* On Python 2, Future.set_exception() now only saves the traceback if the debug + mode of the event loop is enabled for best performances in production mode. + Use ``loop.set_debug(True)`` to save the traceback. + +Bugfixes: + +* Fix ``BaseEventLoop.default_exception_handler()`` on Python 2: get the + traceback from ``sys.exc_info()`` +* Fix unit tests on SSL sockets on Python older than 2.6.6. Example: + Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4. +* Fix error handling in the asyncio.time_monotonic module +* Fix acquire() method of Lock, Condition and Semaphore: don't return a context + manager but True, as Tulip. Task._step() now does the trick. + +Other changes: + +* tox.ini: set PYTHONASYNCIODEBUG to 1 to run tests + +2014-02-25: version 0.1.6 +========================= + +Trollius changes: + +* Add a new Sphinx documentation: + https://trollius.readthedocs.io/ +* tox: pass posargs to nosetests. Patch contributed by Ian Wienand. +* Fix support of Python 3.2 and add py32 to tox.ini +* Merge with Tulip 0.4.1 + +Major changes of Tulip 0.4.1: + +* Issue #81: Add support for UNIX Domain Sockets. New APIs: + + - loop.create_unix_connection() + - loop.create_unix_server() + - streams.open_unix_connection() + - streams.start_unix_server() + +* Issue #80: Add new event loop exception handling API. New APIs: + + - loop.set_exception_handler() + - loop.call_exception_handler() + - loop.default_exception_handler() + +* Issue #136: Add get_debug() and set_debug() methods to BaseEventLoopTests. + Add also a ``PYTHONASYNCIODEBUG`` environment variable to debug coroutines + since Python startup, to be able to debug coroutines defined directly in the + asyncio module. + +Other changes of Tulip 0.4.1: + +* asyncio.subprocess: Fix a race condition in communicate() +* Fix _ProactorWritePipeTransport._pipe_closed() +* Issue #139: Improve error messages on "fatal errors". +* Issue #140: WriteTransport.set_write_buffer_size() to call + _maybe_pause_protocol() +* Issue #129: BaseEventLoop.sock_connect() now raises an error if the address + is not resolved (hostname instead of an IP address) for AF_INET and + AF_INET6 address families. +* Issue #131: as_completed() and wait() now raises a TypeError if the list of + futures is not a list but a Future, Task or coroutine object +* Python issue #20495: Skip test_read_pty_output() of test_asyncio on FreeBSD + older than FreeBSD 8 +* Issue #130: Add more checks on subprocess_exec/subprocess_shell parameters +* Issue #126: call_soon(), call_soon_threadsafe(), call_later(), call_at() + and run_in_executor() now raise a TypeError if the callback is a coroutine + function. +* Python issue #20505: BaseEventLoop uses again the resolution of the clock + to decide if scheduled tasks should be executed or not. + + +2014-02-10: version 0.1.5 +========================= + +- Merge with Tulip 0.3.1: + + * New asyncio.subprocess module + * _UnixWritePipeTransport now also supports character devices, as + _UnixReadPipeTransport. Patch written by Jonathan Slenders. + * StreamReader.readexactly() now raises an IncompleteReadError if the + end of stream is reached before we received enough bytes, instead of + returning less bytes than requested. + * poll and epoll selectors now round the timeout away from zero (instead of + rounding towards zero) to fix a performance issue + * asyncio.queue: Empty renamed to QueueEmpty, Full to QueueFull + * _fatal_error() of _UnixWritePipeTransport and _ProactorBasePipeTransport + don't log BrokenPipeError nor ConnectionResetError + * Future.set_exception(exc) now instanciate exc if it is a class + * streams.StreamReader: Use bytearray instead of deque of bytes for internal + buffer + +- Fix test_wait_for() unit test + +2014-01-22: version 0.1.4 +========================= + +- The project moved to https://bitbucket.org/enovance/trollius +- Fix CoroWrapper (_DEBUG=True): add missing import +- Emit a warning when Return is not raised +- Merge with Tulip to get latest Tulip bugfixes +- Fix dependencies in tox.ini for the different Python versions + +2014-01-13: version 0.1.3 +========================= + +- Workaround bugs in the ssl module of Python older than 2.6.6. For example, + Mac OS 10.6 (Snow Leopard) uses Python 2.6.1. +- ``return x, y`` is now written ``raise Return(x, y)`` instead of + ``raise Return((x, y))`` +- Support "with (yield lock):" syntax for Lock, Condition and Semaphore +- SSL support is now optional: don't fail if the ssl module is missing +- Add tox.ini, tool to run unit tests. For example, "tox -e py27" creates a + virtual environment to run tests with Python 2.7. + +2014-01-08: version 0.1.2 +========================= + +- Trollius now supports CPython 2.6-3.4, PyPy and Windows. All unit tests + pass with CPython 2.7 on Linux. +- Fix Windows support. Fix compilation of the _overlapped module and add a + asyncio._winapi module (written in pure Python). Patch written by Marc + Schlaich. +- Support Python 2.6: require an extra dependency, + ordereddict (and unittest2 for unit tests) +- Support Python 3.2, 3.3 and 3.4 +- Support PyPy 2.2 +- Don't modify __builtins__ nor the ssl module to inject backported exceptions + like BlockingIOError or SSLWantReadError. Exceptions are available in the + asyncio module, ex: asyncio.BlockingIOError. + +2014-01-06: version 0.1.1 +========================= + +- Fix asyncio.time_monotonic on Mac OS X +- Fix create_connection(ssl=True) +- Don't export backported SSLContext in the ssl module anymore to not confuse + libraries testing hasattr(ssl, "SSLContext") +- Relax dependency on the backported concurrent.futures module: use a + synchronous executor if the module is missing + +2014-01-04: version 0.1 +======================= + +- First public release diff --git a/COPYING b/COPYING new file mode 100644 index 00000000..11069edd --- /dev/null +++ b/COPYING @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..017fcfba --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,18 @@ +include AUTHORS COPYING TODO.rst tox.ini +include Makefile +include overlapped.c pypi.bat +include check.py runtests.py release.py +include update-asyncio-*.sh +include .travis.yml +include appveyor.yml +include releaser.conf + +include doc/conf.py doc/make.bat doc/Makefile +include doc/*.rst doc/*.jpg + +include examples/*.py + +include tests/*.crt tests/*.pem tests/*.key +include tests/*.py + +include *.rst diff --git a/Makefile b/Makefile index 89fb927d..768298b1 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,62 @@ +# Some simple testing tasks (sorry, UNIX only). + +PYTHON=python +VERBOSE=$(V) +V= 0 +FLAGS= + test: - time python3.3 main.py + $(PYTHON) runtests.py -v $(VERBOSE) $(FLAGS) + +vtest: + $(PYTHON) runtests.py -v 1 $(FLAGS) + +testloop: + while sleep 1; do $(PYTHON) runtests.py -v $(VERBOSE) $(FLAGS); done + +# See runtests.py for coverage installation instructions. +cov coverage: + $(PYTHON) runtests.py --coverage -v $(VERBOSE) $(FLAGS) + +check: + $(PYTHON) check.py + +# Requires "pip install pep8". +pep8: check + pep8 --ignore E125,E127,E226 tests asyncio + +clean: + rm -rf `find . -name __pycache__` + rm -f `find . -type f -name '*.py[co]' ` + rm -f `find . -type f -name '*~' ` + rm -f `find . -type f -name '.*~' ` + rm -f `find . -type f -name '@*' ` + rm -f `find . -type f -name '#*#' ` + rm -f `find . -type f -name '*.orig' ` + rm -f `find . -type f -name '*.rej' ` + rm -rf dist + rm -f .coverage + rm -rf htmlcov + rm -rf build + rm -rf asyncio.egg-info + rm -f MANIFEST + rm -rf trollius.egg-info + rm -rf .tox + + +# For distribution builders only! +# Push a source distribution for Python 3.3 to PyPI. +# You must update the version in setup.py first. +# A PyPI user configuration in ~/.pypirc is required; +# you can create a suitable confifuration using +# python setup.py register +pypi: clean + python3.3 setup.py sdist upload -profile: - python3.3 -m profile -s time main.py +# The corresponding action on Windows is pypi.bat. For that to work, +# you need to install wheel and setuptools. The easiest way is to get +# pip using the get-pip.py script found here: +# https://pip.pypa.io/en/latest/installing.html#install-pip +# That will install setuptools and pip; then you can just do +# \Python33\python.exe -m pip install wheel +# after which the pypi.bat script should work. diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..79764d3d --- /dev/null +++ b/README.rst @@ -0,0 +1,11 @@ +======== +Trollius +======== + +.. image:: http://unmaintained.tech/badge.svg + :target: http://unmaintained.tech/ + :alt: No Maintenance Intended + +.. warning:: + The Trollius project is deprecated and unsupported. It is on PyPI + to support existing dependencies only. diff --git a/TODO.rst b/TODO.rst new file mode 100644 index 00000000..f600cbdf --- /dev/null +++ b/TODO.rst @@ -0,0 +1,25 @@ +Unsorted "TODO" tasks: + +* Python 3.5: Fix test_task_repr() +* Python 3.4: Fix test_asyncio() +* Drop platform without ssl module? +* streams.py:FIXME: should we support __aiter__ and __anext__ in Trollius? +* replace selectors.py with selectors34: + https://github.com/berkerpeksag/selectors34/pull/2 +* check ssl.SSLxxx in update_xxx.sh +* document how to port asyncio to trollius +* use six instead of compat +* Replace logger with warning in monotonic clock and synchronous executor +* Windows: use _overlapped in py33_winapi? +* Fix tests failing with PyPy: + + - sys.getrefcount() + - test_queues.test_repr + - test_futures.test_tb_logger_exception_unretrieved + +* write unit test for create_connection(ssl=True) +* Fix examples: + + - stacks.py: 'exceptions.ZeroDivisionError' object has no attribute '__traceback__' + +* Fix all FIXME in the code diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 00000000..2848eecb --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,82 @@ +environment: + matrix: + - PYTHON: "C:\\Python27-x64" + PYTHON_VERSION: "2.7.x" # currently 2.7.13 + PYTHON_ARCH: "64" + PYTHON_EXE: python + - PYTHON: "C:\\Python27" + PYTHON_VERSION: "2.7.x" # currently 2.7.13 + PYTHON_ARCH: "32" + PYTHON_EXE: python + GWHEEL_ONLY: true + # PyPy 7.1 won't actually build the _overlapped extension: + # overlapped.c(92) : warning C4013: 'PyErr_SetExcFromWindowsErr' undefined; assuming extern returning int + # overlapped.c(92) : warning C4047: 'return' : 'PyObject *' differs in levels of indirection from 'int' + # overlapped.c(1166) : warning C4101: 'AddressObj' : unreferenced local variable + # - PYTHON: "C:\\pypy2.7-v7.1.0-win32" + # PYTHON_ID: "pypy" + # PYTHON_EXE: pypy + # PYTHON_VERSION: "2.7.x" + # PYTHON_ARCH: "32" + +install: + # If there is a newer build queued for the same PR, cancel this one. + # The AppVeyor 'rollout builds' option is supposed to serve the same + # purpose but it is problematic because it tends to cancel builds pushed + # directly to master instead of just PR builds (or the converse). + # credits: JuliaLang developers. + - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` + https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | ` + Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { ` + throw "There are newer queued builds for this pull request, failing early." } + - ECHO "Filesystem root:" + - ps: "ls \"C:/\"" + + - ECHO "Installed SDKs:" + - ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\"" + + # Install Python (from the official .msi of http://python.org) and pip when + # not already installed. + # PyPy portion based on https://github.com/wbond/asn1crypto/blob/master/appveyor.yml + - ps: + $env:PYTMP = "${env:TMP}\py"; + if (!(Test-Path "$env:PYTMP")) { + New-Item -ItemType directory -Path "$env:PYTMP" | Out-Null; + } + if ("${env:PYTHON_ID}" -eq "pypy") { + if (!(Test-Path "${env:PYTMP}\pypy2-v7.1.0-win32.zip")) { + (New-Object Net.WebClient).DownloadFile('https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.1.0-win32.zip', "${env:PYTMP}\pypy2-v7.1.0-win32.zip"); + } + 7z x -y "${env:PYTMP}\pypy2-v7.1.0-win32.zip" -oC:\ | Out-Null; + & "${env:PYTHON}\pypy.exe" "-mensurepip"; + + } + elseif (-not(Test-Path($env:PYTHON))) { + & appveyor\install.ps1; + } + + # Prepend newly installed Python to the PATH of this build (this cannot be + # done from inside the powershell script as it would require to restart + # the parent CMD process). + - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PYTHON%\\bin;%PATH%" + - "SET PYEXE=%PYTHON%\\%PYTHON_EXE%.exe" + + # Check that we have the expected version and architecture for Python + - "%PYEXE% --version" + + - "%PYEXE% -m pip install --disable-pip-version-check -U pip" + - "%PYEXE% -m pip install -U setuptools wheel" + +build_script: + - "%PYEXE% setup.py bdist_wheel" + +# Theer's nothing to actually test anymore; the aiotest dependency is gone. +test_script: + - "%PYEXE% --version" + +artifacts: + - path: dist/*.whl + +cache: + - "%TMP%\\py\\" + - '%LOCALAPPDATA%\pip\Cache' diff --git a/check.py b/check.py new file mode 100644 index 00000000..dcefc185 --- /dev/null +++ b/check.py @@ -0,0 +1,45 @@ +"""Search for lines >= 80 chars or with trailing whitespace.""" + +import os +import sys + + +def main(): + args = sys.argv[1:] or os.curdir + for arg in args: + if os.path.isdir(arg): + for dn, dirs, files in os.walk(arg): + for fn in sorted(files): + if fn.endswith('.py'): + process(os.path.join(dn, fn)) + dirs[:] = [d for d in dirs if d[0] != '.'] + dirs.sort() + else: + process(arg) + + +def isascii(x): + try: + x.encode('ascii') + return True + except UnicodeError: + return False + + +def process(fn): + try: + f = open(fn) + except IOError as err: + print(err) + return + try: + for i, line in enumerate(f): + line = line.rstrip('\n') + sline = line.rstrip() + if len(line) >= 80 or line != sline or not isascii(line): + print('{0}:{1:d}:{2}{3}'.format( + fn, i+1, sline, '_' * (len(line) - len(sline)))) + finally: + f.close() + +main() diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000..314751af --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Trollius.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Trollius.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Trollius" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Trollius" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/asyncio.rst b/doc/asyncio.rst new file mode 100644 index 00000000..2c379353 --- /dev/null +++ b/doc/asyncio.rst @@ -0,0 +1,188 @@ +++++++++++++++++++++ +Trollius and asyncio +++++++++++++++++++++ + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +Differences between Trollius and asyncio +======================================== + +Syntax of coroutines +-------------------- + +The major difference between Trollius and asyncio is the syntax of coroutines: + +================== ====================== +asyncio Trollius +================== ====================== +``yield from ...`` ``yield From(...)`` +``yield from []`` ``yield From(None)`` +``return`` ``raise Return()`` +``return x`` ``raise Return(x)`` +``return x, y`` ``raise Return(x, y)`` +================== ====================== + +Because of this major difference, it was decided to call the module +``trollius`` instead of ``asyncio``. This choice also allows to use Trollius on +Python 3.4 and later. Changing imports is not enough to use Trollius code with +asyncio: the asyncio event loop explicit rejects coroutines using ``yield`` +(instead of ``yield from``). + +OSError and socket.error exceptions +----------------------------------- + +The ``OSError`` exception changed in Python 3.3: there are now subclasses like +``ConnectionResetError`` or ``BlockingIOError``. The exception hierarchy also +changed: ``socket.error`` is now an alias to ``OSError``. The ``asyncio`` +module is written for Python 3.3 and newer and so is based on these new +exceptions. + +.. seealso:: + + `PEP 3151: Reworking the OS and IO exception hierarchy + `_. + +On Python 3.2 and older, Trollius wraps ``OSError``, ``IOError``, +``socket.error`` and ``select.error`` exceptions on operating system and socket +operations to raise more specific exceptions, subclasses of ``OSError``: + +* ``trollius.BlockingIOError`` +* ``trollius.BrokenPipeError`` +* ``trollius.ChildProcessError`` +* ``trollius.ConnectionAbortedError`` +* ``trollius.ConnectionRefusedError`` +* ``trollius.ConnectionResetError`` +* ``trollius.FileNotFoundError`` +* ``trollius.InterruptedError`` +* ``trollius.PermissionError`` + +On Python 3.3 and newer, these symbols are just aliases to builtin exceptions. + +.. note:: + + ``ssl.SSLError`` exceptions are not wrapped to ``OSError``, even if + ``ssl.SSLError`` is a subclass of ``socket.error``. + + +SSLError +-------- + +On Python 3.2 and older, Trollius wraps ``ssl.SSLError`` exceptions to raise +more specific exceptions, subclasses of ``ssl.SSLError``, to mimic the Python +3.3: + +* ``trollius.SSLEOFError`` +* ``trollius.SSLWantReadError`` +* ``trollius.SSLWantWriteError`` + +On Python 3.3 and newer, these symbols are just aliases to exceptions of the +``ssl`` module. + +``trollius.BACKPORT_SSL_ERRORS`` constant: + +* ``True`` if ``ssl.SSLError`` are wrapped to Trollius exceptions (Python 2 + older than 2.7.9, or Python 3 older than 3.3), +* ``False`` is trollius SSL exceptions are just aliases. + + +SSLContext +---------- + +Python 3.3 has a new ``ssl.SSLContext`` class: see the `documentaton of the +ssl.SSLContext class +`_. + +On Python 3.2 and older, Trollius has a basic ``trollius.SSLContext`` class to +mimic Python 3.3 API, but it only has a few features: + +* ``protocol``, ``certfile`` and ``keyfile`` attributes +* read-only ``verify_mode`` attribute: its value is ``CERT_NONE`` +* ``load_cert_chain(certfile, keyfile)`` method +* ``wrap_socket(sock, **kw)`` method: see the ``ssl.wrap_socket()`` + documentation of your Python version for the keyword parameters + +Example of missing features: + +* no ``options`` attribute +* the ``verify_mode`` attriubte cannot be modified +* no ``set_default_verify_paths()`` method +* no "Server Name Indication" (SNI) support +* etc. + +On Python 3.2 and older, the trollius SSL transport does not have the +``'compression'`` extra info. + +``trollius.BACKPORT_SSL_CONTEXT`` constant: + +* ``True`` if ``trollius.SSLContext`` is the backported class (Python 2 older + than 2.7.9, or Python 3 older than 3.3), +* ``False`` if ``trollius.SSLContext`` is just an alias to ``ssl.SSLContext``. + + +Other differences +----------------- + +* Trollius uses the ``TROLLIUSDEBUG`` envrionment variable instead of + the ``PYTHONASYNCIODEBUG`` envrionment variable. ``TROLLIUSDEBUG`` variable + is used even if the Python command line option ``-E`` is used. +* ``asyncio.subprocess`` has no ``DEVNULL`` constant +* Python 2 does not support keyword-only parameters. +* If the ``concurrent.futures`` module is missing, + ``BaseEventLoop.run_in_executor()`` uses a synchronous executor instead of a + pool of threads. It blocks until the function returns. For example, DNS + resolutions are blocking in this case. +* Trollius has more symbols than asyncio for compatibility with Python older + than 3.3: + + - ``From``: part of ``yield From(...)`` syntax + - ``Return``: part of ``raise Return(...)`` syntax + + +Write code working on Trollius and asyncio +========================================== + +Trollius and asyncio are different, especially for coroutines (``yield +From(...)`` vs ``yield from ...``). + +To use asyncio or Trollius on Python 2 and Python 3, add the following code at +the top of your file:: + + try: + # Use builtin asyncio on Python 3.4+, or asyncio on Python 3.3 + import asyncio + except ImportError: + # Use Trollius on Python <= 3.2 + import trollius as asyncio + +It is possible to write code working on both projects using only callbacks. +This option is used by the following projects which work on Trollius and asyncio: + +* `AutobahnPython `_: WebSocket & + WAMP for Python, it works on Trollius (Python 2.6 and 2.7), asyncio (Python + 3.3) and Python 3.4 (asyncio), and also on Twisted. +* `Pulsar `_: Event driven concurrent + framework for Python. With pulsar you can write asynchronous servers + performing one or several activities in different threads and/or processes. + Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio`` + module if available, or import ``trollius``. +* `Tornado `_ supports asyncio and Trollius since + Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado + `_. It tries to import + asyncio or fallback on importing trollius. + +Another option is to provide functions returning ``Future`` objects, so the +caller can decide to use callback using ``fut.add_done_callback(callback)`` or +to use coroutines (``yield From(fut)`` for Trollius, or ``yield from fut`` for +asyncio). This option is used by the `aiodns `_ +project for example. + +Since Trollius 0.4, it's possible to use asyncio and Trollius coroutines in the +same process. The only limit is that the event loop must be a Trollius event +loop. + +.. note:: + + The Trollius module was called ``asyncio`` in Trollius version 0.2. The + module name changed to ``trollius`` to support Python 3.4. + diff --git a/doc/changelog.rst b/doc/changelog.rst new file mode 100644 index 00000000..c287fbf9 --- /dev/null +++ b/doc/changelog.rst @@ -0,0 +1,4 @@ +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +.. include:: ../CHANGES.rst diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 00000000..bed8900b --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# +# Trollius documentation build configuration file, created by +# sphinx-quickstart on Fri Feb 21 11:05:42 2014. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#import sys, os +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Trollius' +copyright = u'2014, Victor Stinner' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = release = '2.3' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Trolliusdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'Trollius.tex', u'Trollius Documentation', + u'Victor Stinner', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'trollius', u'Trollius Documentation', + [u'Victor Stinner'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'Trollius', u'Trollius Documentation', + u'Victor Stinner', 'Trollius', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' diff --git a/doc/deprecated.rst b/doc/deprecated.rst new file mode 100644 index 00000000..f5f5743a --- /dev/null +++ b/doc/deprecated.rst @@ -0,0 +1,98 @@ +.. _deprecated: + +Trollius is deprecated +====================== + +.. warning:: + The Trollius project is now deprecated! + +Trollius is deprecated since the release 2.1. The maintainer of Trollius, +Victor Stinner, doesn't want to maintain the project anymore for many reasons. +This page lists some reasons. + +DON'T PANIC! There is the asyncio project which has the same API and is well +maintained! Only trollius is deprecated. + +Since the Trollius is used for some projects in the wild, Trollius will +not disappear. You can only expect *minimum* maintenance like minor bugfixes. +Don't expect new features nor synchronization with the latest asyncio. + +To be clear: I am looking for a new maintainer. If you want to take over +trollius: please do it, I will give you everything you need (and maybe more!). + +asyncio +------- + +`asyncio is here `_! asyncio is well +maintainted, has a growing community, has many libraries and don't stop +evolving to be enhanced by users feedbacks. I (Victor Stinner) even heard that +it is fast! + +asyncio requires Python 3.3 or newer. Yeah, you all have a long list of reasons +to not port your legacy code for Python 3. But I have my own reasons to prefer +to invest in the Python 3 rather than in legacy Python (Python 2). + + +No Trollius Community +--------------------- + +* Very the asyncio is growing everyday, there is no trollius community. + Sadly, asyncio libraries don't work for trollius. +* Only :ref:`very few libraries support Trollius `: to be clear, + there is no HTTP client for Trollius, whereas HTTP is the most common + protocol in 2015. +* It's a deliberate choice of library authors to not support Trollius to + keep a simple code base. The Python 3 is simpler than Python 2: supporting + Python 2 in a library requires more work. For example, aiohttp doesn't + want to support trollius. + +Python 2 +-------- + +* Seriously? Come on! Stop procrastination and upgrade your code to Python 3! + +Lack of interest +---------------- + +* The Trollius project was created to replace eventlet with asyncio (trollius) + in the OpenStack project, but replacing eventlet with trollius has failed for + different reasons. The original motivation is gone. + +Technical issues with trollius +------------------------------ + +* While Trollius API is "simple", the implementation is very complex to be + efficient on all platforms. +* Trollius requires :ref:`backports ` of libraries to support + old Python versions. These backports are not as well supported as the version + in the Python standard library. +* Supporting Python 2.7, Python 3.3 and Python 3.5 in the same code base + and supporting asyncio is very difficult. Generators and coroutines changed + a lot in each Python version. For example, hacks are required to support + Python 3.5 with the PEP 479 which changed the usage of the ``StopIteration`` + exception. Trollius initially also supported Python 2.6 and 3.2. + +Technical issues related to asyncio and yield from +-------------------------------------------------- + +* Synchronizing trollius with asyncio is a complex, tricky and error-prone task + which requires to be very carefull and a lot of manual changes. +* Porting Python 3 asyncio to Python 2 requires a lot of subtle changes which + takes a lot of time at each synchronization. +* It is not possible to use asyncio ``yield from`` coroutines in Python 2, + since the ``yield from`` instruction raises a ``SyntaxError`` exceptions. + Supporting Trollius and asyncio requires to duplicate some parts of the + library and application code which makes the code more complex and more + difficult to maintain. +* Trollius coroutines are slower than asyncio coroutines: the ``yield`` + instruction requires to delegate manually nested coroutines, whereas the + ``yield from`` instruction delegates them directly in the Python language. + asyncio requires less loop iterations than trollius for the same nested + coroutine. + +Other technical issues +---------------------- + +* Building wheel packages on Windows is difficult (need a running Windows, + need working Windows SDKs for each version of Python, need to test + and fix bugs specific to Windows, etc.) diff --git a/doc/dev.rst b/doc/dev.rst new file mode 100644 index 00000000..a05179bd --- /dev/null +++ b/doc/dev.rst @@ -0,0 +1,84 @@ +Run tests +========= + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +Run tests with tox +------------------ + +The `tox project `_ can be used to build a +virtual environment with all runtime and test dependencies and run tests +against different Python versions (2.7, 3.3, 3.4). + +For example, to run tests with Python 2.7, just type:: + + tox -e py27 + +To run tests against other Python versions: + +* ``py27``: Python 2.7 +* ``py33``: Python 3.3 +* ``py34``: Python 3.4 + + +Test Dependencies +----------------- + +On Python older than 3.3, unit tests require the `mock +`_ module. Python 2.6 and 2.7 require also +`unittest2 `_. + + +Run tests on UNIX +----------------- + +Run the following commands from the directory of the Trollius project. + +To run tests:: + + make test + +To run coverage (``coverage`` package is required):: + + make coverage + + +Run tests on Windows +-------------------- + +Run the following commands from the directory of the Trollius project. + +You can run the tests as follows:: + + C:\Python27\python.exe runtests.py + +And coverage as follows:: + + C:\Python27\python.exe runtests.py --coverage + + +CPython bugs +============ + +The development of asyncio and trollius helped to identify different bugs in CPython: + +* 2.5.0 <= python <= 3.4.2: `sys.exc_info() bug when yield/yield-from is used + in an except block in a generator (#23353>) + `_. The fix will be part of Python 3.4.3. + _UnixSelectorEventLoop._make_subprocess_transport() and + ProactorEventLoop._make_subprocess_transport() work around the bug. +* python == 3.4.0: `Segfault in gc with cyclic trash (#21435) + `_. + Regression introduced in Python 3.4.0, fixed in Python 3.4.1. + Status in Ubuntu the February, 3th 2015: only Ubuntu Trusty (14.04 LTS) is + impacted (`bug #1367907: Segfault in gc with cyclic trash + `_, see + also `update Python3 for trusty #1348954 + `_) +* 3.3.0 <= python <= 3.4.0: `gen.send(tuple) unpacks the tuple instead of + passing 1 argument (the tuple) when gen is an object with a send() method, + not a classic generator (#21209) `_. + Regression introduced in Python 3.4.0, fixed in Python 3.4.1. + trollius.CoroWrapper.send() works around the issue, the bug is checked at + runtime once, when the module is imported. diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 00000000..7119f029 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,83 @@ +Trollius +======== + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +.. image:: trollius.jpg + :alt: Trollius altaicus from Khangai Mountains (Mongòlia) + :align: right + :target: http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg + +Trollius provides infrastructure for writing single-threaded concurrent +code using coroutines, multiplexing I/O access over sockets and other +resources, running network clients and servers, and other related primitives. +Here is a more detailed list of the package contents: + +* a pluggable event loop with various system-specific implementations; + +* transport and protocol abstractions (similar to those in `Twisted + `_); + +* concrete support for TCP, UDP, SSL, subprocess pipes, delayed calls, and + others (some may be system-dependent); + +* a ``Future`` class that mimics the one in the ``concurrent.futures`` module, + but adapted for use with the event loop; + +* coroutines and tasks based on generators (``yield``), to help write + concurrent code in a sequential fashion; + +* cancellation support for ``Future``\s and coroutines; + +* synchronization primitives for use between coroutines in a single thread, + mimicking those in the ``threading`` module; + +* an interface for passing work off to a threadpool, for times when you + absolutely, positively have to use a library that makes blocking I/O calls. + +Trollius is a portage of the `asyncio project `_ +(``asyncio`` module, `PEP 3156 `_) +on Python 2. Trollius works on Python 2.7, 3.3 and 3.4. It has been tested on +Windows, Linux, Mac OS X, FreeBSD and OpenIndiana. + +* `Asyncio documentation `_ +* `Trollius project in the Python Cheeseshop (PyPI) + `_ (download wheel packages and + tarballs) +* `Trollius project at Github `_ + (bug tracker, source code) +* Mailing list: `python-tulip Google Group + `_ +* IRC: ``#asyncio`` channel on the `Freenode network `_ +* Copyright/license: Open source, Apache 2.0. Enjoy! + +See also the `asyncio project at Github `_. + + +Table Of Contents +================= + +.. toctree:: + + deprecated + using + install + libraries + asyncio + dev + changelog + + +Trollius name +============= + +Extract of `Trollius Wikipedia article +`_: + +Trollius is a genus of about 30 species of plants in the family Ranunculaceae, +closely related to Ranunculus. The common name of some species is globeflower +or globe flower. Native to the cool temperate regions of the Northern +Hemisphere, with the greatest diversity of species in Asia, trollius usually +grow in heavy, wet clay soils. + diff --git a/doc/install.rst b/doc/install.rst new file mode 100644 index 00000000..d9e65ccd --- /dev/null +++ b/doc/install.rst @@ -0,0 +1,125 @@ +++++++++++++++++ +Install Trollius +++++++++++++++++ + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +Trollius supports Python 2.7, 3.3 and 3.4. + +There is an experimental support of Python 3.5. Issues with Python 3.5: + +* don't support asyncio coroutines +* ``Task.get_task()`` is broken +* ``repr(Task)`` is broken + +Support of Python 2.6 and 3.2 was dropped in Trollius 2.1. + + +Packages for Linux +================== + +* `Debian package + `_ +* `ArchLinux package + `_ +* `Fedora and CentOS package: python-trollius + `_ + + +Install Trollius on Windows using pip +===================================== + +Since Trollius 0.2, `precompiled wheel packages `_ +are now distributed on the Python Cheeseshop (PyPI). Procedure to install +Trollius on Windows: + +* `Install pip + `_, download + ``get-pip.py`` and type:: + + \Python27\python.exe get-pip.py + +* If you already have pip, ensure that you have at least pip 1.4. If you need + to upgrade:: + + \Python27\python.exe -m pip install -U pip + +* Install Trollius:: + + \Python27\python.exe -m pip install trollius + +* pip also installs the ``futures`` dependency + +.. note:: + + Only wheel packages for Python 2.7, 3.3 and 3.4 are currently distributed on + the Cheeseshop (PyPI). If you need wheel packages for other Python versions, + please ask. + +Download source code +==================== + +Command to download the development version of the source code (``trollius`` +branch):: + + git clone https://github.com/haypo/trollius.git -b trollius + +The actual code lives in the ``trollius`` subdirectory. Tests are in the +``tests`` subdirectory. + +See the `trollius project at Github +`_. + +The source code of the Trollius project is in the ``trollius`` branch of the +Mercurial repository, not in the default branch. The default branch is the +Tulip project, Trollius repository is a fork of the Tulip repository. + + +Dependencies +============ + +Trollius requires the `six `_ module. + +Python 2.7 requires `futures `_ to get a +backport of ``concurrent.futures``. + + +Build manually Trollius on Windows +================================== + +On Windows, if you cannot use precompiled wheel packages, an extension module +must be compiled: the ``_overlapped`` module (source code: ``overlapped.c``). +Read `Compile Python extensions on Windows +`_ +to prepare your environment to build the Python extension. Then build the +extension using:: + + C:\Python27\python.exe setup.py build_ext + +.. _backports: + +Backports +========= + +To support old Python versions, many Python modules of the standard library +have been backported: + +======================== ========= ======================= +Name Python Backport +======================== ========= ======================= +OSError 3.3 asyncio.py33_exceptions +_overlapped 3.4 asyncio._overlapped +_winapi 3.3 asyncio.py33_winapi +collections.OrderedDict 2.7, 3.1 ordereddict (PyPI) +concurrent.futures 3.2 futures (PyPI) +selectors 3.4 asyncio.selectors +ssl 3.2, 3.3 asyncio.py3_ssl +time.monotonic 3.3 asyncio.time_monotonic +unittest 2.7, 3.1 unittest2 (PyPI) +unittest.mock 3.3 mock (PyPI) +weakref.WeakSet 2.7, 3.0 asyncio.py27_weakrefset +======================== ========= ======================= + + + diff --git a/doc/libraries.rst b/doc/libraries.rst new file mode 100644 index 00000000..9abc3c07 --- /dev/null +++ b/doc/libraries.rst @@ -0,0 +1,35 @@ +.. _libraries: + +++++++++++++++++++ +Trollius Libraries +++++++++++++++++++ + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +Libraries compatible with asyncio and trollius +============================================== + +* `aioeventlet `_: asyncio API + implemented on top of eventlet +* `aiogevent `_: asyncio API + implemented on top of gevent +* `AutobahnPython `_: WebSocket & + WAMP for Python, it works on Trollius (Python 2.6 and 2.7), asyncio (Python + 3.3) and Python 3.4 (asyncio), and also on Twisted. +* `Pulsar `_: Event driven concurrent + framework for Python. With pulsar you can write asynchronous servers + performing one or several activities in different threads and/or processes. + Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio`` + module if available, or import ``trollius``. +* `Tornado `_ supports asyncio and Trollius since + Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado + `_. It tries to import + asyncio or fallback on importing trollius. + +Specific Ports +============== + +* `trollius-redis `_: + A port of `asyncio-redis `_ to + trollius diff --git a/doc/make.bat b/doc/make.bat new file mode 100644 index 00000000..5789d413 --- /dev/null +++ b/doc/make.bat @@ -0,0 +1,190 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Trollius.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Trollius.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/doc/trollius.jpg b/doc/trollius.jpg new file mode 100644 index 00000000..f4976c72 Binary files /dev/null and b/doc/trollius.jpg differ diff --git a/doc/using.rst b/doc/using.rst new file mode 100644 index 00000000..c8185dc2 --- /dev/null +++ b/doc/using.rst @@ -0,0 +1,88 @@ +++++++++++++++ +Using Trollius +++++++++++++++ + +.. warning:: + :ref:`The Trollius project is now deprecated! ` + +Documentation of the asyncio module +=================================== + +The documentation of the asyncio is part of the Python project. It can be read +online: `asyncio - Asynchronous I/O, event loop, coroutines and tasks +`_. + +To adapt asyncio examples for Trollius, "just": + +* replace ``asyncio`` with ``trollius`` + (or use ``import trollius as asyncio``) +* replace ``yield from ...`` with ``yield From(...)`` +* replace ``yield from []`` with ``yield From(None)`` +* in coroutines, replace ``return res`` with ``raise Return(res)`` + + +Trollius Hello World +==================== + +Print ``Hello World`` every two seconds, using a coroutine:: + + import trollius + from trollius import From + + @trollius.coroutine + def greet_every_two_seconds(): + while True: + print('Hello World') + yield From(trollius.sleep(2)) + + loop = trollius.get_event_loop() + loop.run_until_complete(greet_every_two_seconds()) + + +Debug mode +========== + +To enable the debug mode: + +* Set ``TROLLIUSDEBUG`` envrironment variable to ``1`` +* Configure logging to log at level ``logging.DEBUG``, + ``logging.basicConfig(level=logging.DEBUG)`` for example + +The ``BaseEventLoop.set_debug()`` method can be used to set the debug mode on a +specific event loop. The environment variable enables also the debug mode for +coroutines. + +Effect of the debug mode: + +* On Python 2, :meth:`Future.set_exception` stores the traceback, so + ``loop.run_until_complete()`` raises the exception with the original + traceback. +* Log coroutines defined but never "yielded" +* BaseEventLoop.call_soon() and BaseEventLoop.call_at() methods raise an + exception if they are called from the wrong thread. +* Log the execution time of the selector +* Log callbacks taking more than 100 ms to be executed. The + BaseEventLoop.slow_callback_duration attribute is the minimum duration in + seconds of "slow" callbacks. +* Log most important subprocess events: + + - Log stdin, stdout and stderr transports and protocols + - Log process identifier (pid) + - Log connection of pipes + - Log process exit + - Log Process.communicate() tasks: feed stdin, read stdout and stderr + +* Log most important socket events: + + - Socket connected + - New client (socket.accept()) + - Connection reset or closed by peer (EOF) + - Log time elapsed in DNS resolution (getaddrinfo) + - Log pause/resume reading + - Log time of SSL handshake + - Log SSL handshake errors + +See `Debug mode of asyncio +`_ +for more information. + diff --git a/examples/cacheclt.py b/examples/cacheclt.py new file mode 100644 index 00000000..1f8ece4f --- /dev/null +++ b/examples/cacheclt.py @@ -0,0 +1,215 @@ +"""Client for cache server. + +See cachesvr.py for protocol description. +""" + +import argparse +import trollius as asyncio +from trollius import From, Return +from trollius import test_utils +import json +import logging + +ARGS = argparse.ArgumentParser(description='Cache client example.') +ARGS.add_argument( + '--tls', action='store_true', dest='tls', + default=False, help='Use TLS') +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--host', action='store', dest='host', + default='localhost', help='Host name') +ARGS.add_argument( + '--port', action='store', dest='port', + default=54321, type=int, help='Port number') +ARGS.add_argument( + '--timeout', action='store', dest='timeout', + default=5, type=float, help='Timeout') +ARGS.add_argument( + '--max_backoff', action='store', dest='max_backoff', + default=5, type=float, help='Max backoff on reconnect') +ARGS.add_argument( + '--ntasks', action='store', dest='ntasks', + default=10, type=int, help='Number of tester tasks') +ARGS.add_argument( + '--ntries', action='store', dest='ntries', + default=5, type=int, help='Number of request tries before giving up') + + +args = ARGS.parse_args() + + +class CacheClient: + """Multiplexing cache client. + + This wraps a single connection to the cache client. The + connection is automatically re-opened when an error occurs. + + Multiple tasks may share this object; the requests will be + serialized. + + The public API is get(), set(), delete() (all are coroutines). + """ + + def __init__(self, host, port, sslctx=None, loop=None): + self.host = host + self.port = port + self.sslctx = sslctx + self.loop = loop + self.todo = set() + self.initialized = False + self.task = asyncio.Task(self.activity(), loop=self.loop) + + @asyncio.coroutine + def get(self, key): + resp = yield From(self.request('get', key)) + if resp is None: + raise Return() + raise Return(resp.get('value')) + + @asyncio.coroutine + def set(self, key, value): + resp = yield From(self.request('set', key, value)) + if resp is None: + raise Return(False) + raise Return(resp.get('status') == 'ok') + + @asyncio.coroutine + def delete(self, key): + resp = yield From(self.request('delete', key)) + if resp is None: + raise Return(False) + raise Return(resp.get('status') == 'ok') + + @asyncio.coroutine + def request(self, type, key, value=None): + assert not self.task.done() + data = {'type': type, 'key': key} + if value is not None: + data['value'] = value + payload = json.dumps(data).encode('utf8') + waiter = asyncio.Future(loop=self.loop) + if self.initialized: + try: + yield From(self.send(payload, waiter)) + except IOError: + self.todo.add((payload, waiter)) + else: + self.todo.add((payload, waiter)) + result = (yield From(waiter)) + raise Return(result) + + @asyncio.coroutine + def activity(self): + backoff = 0 + while True: + try: + self.reader, self.writer = yield From(asyncio.open_connection( + self.host, self.port, ssl=self.sslctx, loop=self.loop)) + except Exception as exc: + backoff = min(args.max_backoff, backoff + (backoff//2) + 1) + logging.info('Error connecting: %r; sleep %s', exc, backoff) + yield From(asyncio.sleep(backoff, loop=self.loop)) + continue + backoff = 0 + self.next_id = 0 + self.pending = {} + self. initialized = True + try: + while self.todo: + payload, waiter = self.todo.pop() + if not waiter.done(): + yield From(self.send(payload, waiter)) + while True: + resp_id, resp = yield From(self.process()) + if resp_id in self.pending: + payload, waiter = self.pending.pop(resp_id) + if not waiter.done(): + waiter.set_result(resp) + except Exception as exc: + self.initialized = False + self.writer.close() + while self.pending: + req_id, pair = self.pending.popitem() + payload, waiter = pair + if not waiter.done(): + self.todo.add(pair) + logging.info('Error processing: %r', exc) + + @asyncio.coroutine + def send(self, payload, waiter): + self.next_id += 1 + req_id = self.next_id + frame = 'request %d %d\n' % (req_id, len(payload)) + self.writer.write(frame.encode('ascii')) + self.writer.write(payload) + self.pending[req_id] = payload, waiter + yield From(self.writer.drain()) + + @asyncio.coroutine + def process(self): + frame = yield From(self.reader.readline()) + if not frame: + raise EOFError() + head, tail = frame.split(None, 1) + if head == b'error': + raise IOError('OOB error: %r' % tail) + if head != b'response': + raise IOError('Bad frame: %r' % frame) + resp_id, resp_size = map(int, tail.split()) + data = yield From(self.reader.readexactly(resp_size)) + if len(data) != resp_size: + raise EOFError() + resp = json.loads(data.decode('utf8')) + raise Return(resp_id, resp) + + +def main(): + asyncio.set_event_loop(None) + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + else: + loop = asyncio.new_event_loop() + sslctx = None + if args.tls: + sslctx = test_utils.dummy_ssl_context() + cache = CacheClient(args.host, args.port, sslctx=sslctx, loop=loop) + try: + loop.run_until_complete( + asyncio.gather( + *[testing(i, cache, loop) for i in range(args.ntasks)], + loop=loop)) + finally: + loop.close() + + +@asyncio.coroutine +def testing(label, cache, loop): + + def w(g): + return asyncio.wait_for(g, args.timeout, loop=loop) + + key = 'foo-%s' % label + while True: + logging.info('%s %s', label, '-'*20) + try: + ret = yield From(w(cache.set(key, 'hello-%s-world' % label))) + logging.info('%s set %s', label, ret) + ret = yield From(w(cache.get(key))) + logging.info('%s get %s', label, ret) + ret = yield From(w(cache.delete(key))) + logging.info('%s del %s', label, ret) + ret = yield From(w(cache.get(key))) + logging.info('%s get2 %s', label, ret) + except asyncio.TimeoutError: + logging.warn('%s Timeout', label) + except Exception as exc: + logging.exception('%s Client exception: %r', label, exc) + break + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/examples/cachesvr.py b/examples/cachesvr.py new file mode 100644 index 00000000..20a54e4a --- /dev/null +++ b/examples/cachesvr.py @@ -0,0 +1,250 @@ +"""A simple memcache-like server. + +The basic data structure maintained is a single in-memory dictionary +mapping string keys to string values, with operations get, set and +delete. (Both keys and values may contain Unicode.) + +This is a TCP server listening on port 54321. There is no +authentication. + +Requests provide an operation and return a response. A connection may +be used for multiple requests. The connection is closed when a client +sends a bad request. + +If a client is idle for over 5 seconds (i.e., it does not send another +request, or fails to read the whole response, within this time), it is +disconnected. + +Framing of requests and responses within a connection uses a +line-based protocol. The first line of a request is the frame header +and contains three whitespace-delimited token followed by LF or CRLF: + +- the keyword 'request' +- a decimal request ID; the first request is '1', the second '2', etc. +- a decimal byte count giving the size of the rest of the request + +Note that the requests ID *must* be consecutive and start at '1' for +each connection. + +Response frames look the same except the keyword is 'response'. The +response ID matches the request ID. There should be exactly one +response to each request and responses should be seen in the same +order as the requests. + +After the frame, individual requests and responses are JSON encoded. + +If the frame header or the JSON request body cannot be parsed, an +unframed error message (always starting with 'error') is written back +and the connection is closed. + +JSON-encoded requests can be: + +- {"type": "get", "key": } +- {"type": "set", "key": , "value": } +- {"type": "delete", "key": } + +Responses are also JSON-encoded: + +- {"status": "ok", "value": } # Successful get request +- {"status": "ok"} # Successful set or delete request +- {"status": "notfound"} # Key not found for get or delete request + +If the request is valid JSON but cannot be handled (e.g., the type or +key field is absent or invalid), an error response of the following +form is returned, but the connection is not closed: + +- {"error": } +""" + +import argparse +import trollius as asyncio +from trollius import From +import json +import logging +import os +import random + +ARGS = argparse.ArgumentParser(description='Cache server example.') +ARGS.add_argument( + '--tls', action='store_true', dest='tls', + default=False, help='Use TLS') +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--host', action='store', dest='host', + default='localhost', help='Host name') +ARGS.add_argument( + '--port', action='store', dest='port', + default=54321, type=int, help='Port number') +ARGS.add_argument( + '--timeout', action='store', dest='timeout', + default=5, type=float, help='Timeout') +ARGS.add_argument( + '--random_failure_percent', action='store', dest='fail_percent', + default=0, type=float, help='Fail randomly N percent of the time') +ARGS.add_argument( + '--random_failure_sleep', action='store', dest='fail_sleep', + default=0, type=float, help='Sleep time when randomly failing') +ARGS.add_argument( + '--random_response_sleep', action='store', dest='resp_sleep', + default=0, type=float, help='Sleep time before responding') + +args = ARGS.parse_args() + + +class Cache: + + def __init__(self, loop): + self.loop = loop + self.table = {} + + @asyncio.coroutine + def handle_client(self, reader, writer): + # Wrapper to log stuff and close writer (i.e., transport). + peer = writer.get_extra_info('socket').getpeername() + logging.info('got a connection from %s', peer) + try: + yield From(self.frame_parser(reader, writer)) + except Exception as exc: + logging.error('error %r from %s', exc, peer) + else: + logging.info('end connection from %s', peer) + finally: + writer.close() + + @asyncio.coroutine + def frame_parser(self, reader, writer): + # This takes care of the framing. + last_request_id = 0 + while True: + # Read the frame header, parse it, read the data. + # NOTE: The readline() and readexactly() calls will hang + # if the client doesn't send enough data but doesn't + # disconnect either. We add a timeout to each. (But the + # timeout should really be implemented by StreamReader.) + framing_b = yield From(asyncio.wait_for( + reader.readline(), + timeout=args.timeout, loop=self.loop)) + if random.random()*100 < args.fail_percent: + logging.warn('Inserting random failure') + yield From(asyncio.sleep(args.fail_sleep*random.random(), + loop=self.loop)) + writer.write(b'error random failure\r\n') + break + logging.debug('framing_b = %r', framing_b) + if not framing_b: + break # Clean close. + try: + frame_keyword, request_id_b, byte_count_b = framing_b.split() + except ValueError: + writer.write(b'error unparseable frame\r\n') + break + if frame_keyword != b'request': + writer.write(b'error frame does not start with request\r\n') + break + try: + request_id, byte_count = int(request_id_b), int(byte_count_b) + except ValueError: + writer.write(b'error unparsable frame parameters\r\n') + break + if request_id != last_request_id + 1 or byte_count < 2: + writer.write(b'error invalid frame parameters\r\n') + break + last_request_id = request_id + request_b = yield From(asyncio.wait_for( + reader.readexactly(byte_count), + timeout=args.timeout, loop=self.loop)) + try: + request = json.loads(request_b.decode('utf8')) + except ValueError: + writer.write(b'error unparsable json\r\n') + break + response = self.handle_request(request) # Not a coroutine. + if response is None: + writer.write(b'error unhandlable request\r\n') + break + response_b = json.dumps(response).encode('utf8') + b'\r\n' + byte_count = len(response_b) + framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count) + writer.write(framing_s.encode('ascii')) + yield From(asyncio.sleep(args.resp_sleep*random.random(), + loop=self.loop)) + writer.write(response_b) + + def handle_request(self, request): + # This parses one request and farms it out to a specific handler. + # Return None for all errors. + if not isinstance(request, dict): + return {'error': 'request is not a dict'} + request_type = request.get('type') + if request_type is None: + return {'error': 'no type in request'} + if request_type not in {'get', 'set', 'delete'}: + return {'error': 'unknown request type'} + key = request.get('key') + if not isinstance(key, str): + return {'error': 'key is not a string'} + if request_type == 'get': + return self.handle_get(key) + if request_type == 'set': + value = request.get('value') + if not isinstance(value, str): + return {'error': 'value is not a string'} + return self.handle_set(key, value) + if request_type == 'delete': + return self.handle_delete(key) + assert False, 'bad request type' # Should have been caught above. + + def handle_get(self, key): + value = self.table.get(key) + if value is None: + return {'status': 'notfound'} + else: + return {'status': 'ok', 'value': value} + + def handle_set(self, key, value): + self.table[key] = value + return {'status': 'ok'} + + def handle_delete(self, key): + if key not in self.table: + return {'status': 'notfound'} + else: + del self.table[key] + return {'status': 'ok'} + + +def main(): + asyncio.set_event_loop(None) + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + else: + loop = asyncio.new_event_loop() + sslctx = None + if args.tls: + import ssl + # TODO: take cert/key from args as well. + here = os.path.join(os.path.dirname(__file__), '..', 'tests') + sslctx = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + sslctx.options |= ssl.OP_NO_SSLv2 + sslctx.load_cert_chain( + certfile=os.path.join(here, 'ssl_cert.pem'), + keyfile=os.path.join(here, 'ssl_key.pem')) + cache = Cache(loop) + task = asyncio.streams.start_server(cache.handle_client, + args.host, args.port, + ssl=sslctx, loop=loop) + svr = loop.run_until_complete(task) + for sock in svr.sockets: + logging.info('socket %s', sock.getsockname()) + try: + loop.run_forever() + finally: + loop.close() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/examples/child_process.py b/examples/child_process.py new file mode 100644 index 00000000..9e403a4d --- /dev/null +++ b/examples/child_process.py @@ -0,0 +1,129 @@ +""" +Example of asynchronous interaction with a child python process. + +This example shows how to attach an existing Popen object and use the low level +transport-protocol API. See shell.py and subprocess_shell.py for higher level +examples. +""" + +import os +import sys + +try: + import trollius as asyncio +except ImportError: + # asyncio is not installed + sys.path.append(os.path.join(os.path.dirname(__file__), '..')) + import trollius as asyncio +from trollius import From, Return + +if sys.platform == 'win32': + from trollius.windows_utils import Popen, PIPE + from trollius.windows_events import ProactorEventLoop +else: + from subprocess import Popen, PIPE + +# +# Return a write-only transport wrapping a writable pipe +# + +@asyncio.coroutine +def connect_write_pipe(file): + loop = asyncio.get_event_loop() + transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, file)) + raise Return(transport) + +# +# Wrap a readable pipe in a stream +# + +@asyncio.coroutine +def connect_read_pipe(file): + loop = asyncio.get_event_loop() + stream_reader = asyncio.StreamReader(loop=loop) + def factory(): + return asyncio.StreamReaderProtocol(stream_reader) + transport, _ = yield From(loop.connect_read_pipe(factory, file)) + raise Return(stream_reader, transport) + + +# +# Example +# + +@asyncio.coroutine +def main(loop): + # program which prints evaluation of each expression from stdin + code = r'''if 1: + import os + def writeall(fd, buf): + while buf: + n = os.write(fd, buf) + buf = buf[n:] + while True: + s = os.read(0, 1024) + if not s: + break + s = s.decode('ascii') + s = repr(eval(s)) + '\n' + s = s.encode('ascii') + writeall(1, s) + ''' + + # commands to send to input + commands = iter([b"1+1\n", + b"2**16\n", + b"1/3\n", + b"'x'*50", + b"1/0\n"]) + + # start subprocess and wrap stdin, stdout, stderr + p = Popen([sys.executable, '-c', code], + stdin=PIPE, stdout=PIPE, stderr=PIPE) + + stdin = yield From(connect_write_pipe(p.stdin)) + stdout, stdout_transport = yield From(connect_read_pipe(p.stdout)) + stderr, stderr_transport = yield From(connect_read_pipe(p.stderr)) + + # interact with subprocess + name = {stdout:'OUT', stderr:'ERR'} + registered = {asyncio.Task(stderr.readline()): stderr, + asyncio.Task(stdout.readline()): stdout} + while registered: + # write command + cmd = next(commands, None) + if cmd is None: + stdin.close() + else: + print('>>>', cmd.decode('ascii').rstrip()) + stdin.write(cmd) + + # get and print lines from stdout, stderr + timeout = None + while registered: + done, pending = yield From(asyncio.wait( + registered, timeout=timeout, + return_when=asyncio.FIRST_COMPLETED)) + if not done: + break + for f in done: + stream = registered.pop(f) + res = f.result() + print(name[stream], res.decode('ascii').rstrip()) + if res != b'': + registered[asyncio.Task(stream.readline())] = stream + timeout = 0.0 + + stdout_transport.close() + stderr_transport.close() + +if __name__ == '__main__': + if sys.platform == 'win32': + loop = ProactorEventLoop() + asyncio.set_event_loop(loop) + else: + loop = asyncio.get_event_loop() + try: + loop.run_until_complete(main(loop)) + finally: + loop.close() diff --git a/examples/crawl.py b/examples/crawl.py new file mode 100644 index 00000000..7f540593 --- /dev/null +++ b/examples/crawl.py @@ -0,0 +1,876 @@ +#!/usr/bin/env python + +"""A simple web crawler.""" + +from __future__ import print_function + +# TODO: +# - More organized logging (with task ID or URL?). +# - Use logging module for Logger. +# - KeyboardInterrupt in HTML parsing may hang or report unretrieved error. +# - Support gzip encoding. +# - Close connection if HTTP/1.0 response. +# - Add timeouts. (E.g. when switching networks, all seems to hang.) +# - Add arguments to specify TLS settings (e.g. cert/key files). +# - Skip reading large non-text/html files? +# - Use ETag and If-Modified-Since? +# - Handle out of file descriptors directly? (How?) + +import argparse +import trollius as asyncio +from trollius import From, Return +import asyncio.locks +import cgi +import logging +import re +import sys +import time +try: + from httplib import BadStatusLine + import urlparse + from urllib import splitport as urllib_splitport +except ImportError: + # Python 3 + from http.client import BadStatusLine + from urllib import parse as urlparse + from urllib.parse import splitport as urllib_splitport + + +ARGS = argparse.ArgumentParser(description="Web crawler") +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--select', action='store_true', dest='select', + default=False, help='Use Select event loop instead of default') +ARGS.add_argument( + 'roots', nargs='*', + default=[], help='Root URL (may be repeated)') +ARGS.add_argument( + '--max_redirect', action='store', type=int, metavar='N', + default=10, help='Limit redirection chains (for 301, 302 etc.)') +ARGS.add_argument( + '--max_tries', action='store', type=int, metavar='N', + default=4, help='Limit retries on network errors') +ARGS.add_argument( + '--max_tasks', action='store', type=int, metavar='N', + default=100, help='Limit concurrent connections') +ARGS.add_argument( + '--max_pool', action='store', type=int, metavar='N', + default=100, help='Limit connection pool size') +ARGS.add_argument( + '--exclude', action='store', metavar='REGEX', + help='Exclude matching URLs') +ARGS.add_argument( + '--strict', action='store_true', + default=True, help='Strict host matching (default)') +ARGS.add_argument( + '--lenient', action='store_false', dest='strict', + default=False, help='Lenient host matching') +ARGS.add_argument( + '-v', '--verbose', action='count', dest='level', + default=1, help='Verbose logging (repeat for more verbose)') +ARGS.add_argument( + '-q', '--quiet', action='store_const', const=0, dest='level', + default=1, help='Quiet logging (opposite of --verbose)') + + +ESCAPES = [('quot', '"'), + ('gt', '>'), + ('lt', '<'), + ('amp', '&') # Must be last. + ] + + +def unescape(url): + """Turn & into &, and so on. + + This is the inverse of cgi.escape(). + """ + for name, char in ESCAPES: + url = url.replace('&' + name + ';', char) + return url + + +def fix_url(url): + """Prefix a schema-less URL with http://.""" + if '://' not in url: + url = 'http://' + url + return url + + +class Logger: + + def __init__(self, level): + self.level = level + + def _log(self, n, args): + if self.level >= n: + print(*args, file=sys.stderr) + sys.stderr.flush() + + def log(self, n, *args): + self._log(n, args) + + def __call__(self, n, *args): + self._log(n, args) + + +class ConnectionPool: + """A connection pool. + + To open a connection, use reserve(). To recycle it, use unreserve(). + + The pool is mostly just a mapping from (host, port, ssl) tuples to + lists of Connections. The currently active connections are *not* + in the data structure; get_connection() takes the connection out, + and recycle_connection() puts it back in. To recycle a + connection, call conn.close(recycle=True). + + There are limits to both the overall pool and the per-key pool. + """ + + def __init__(self, log, max_pool=10, max_tasks=5): + self.log = log + self.max_pool = max_pool # Overall limit. + self.max_tasks = max_tasks # Per-key limit. + self.loop = asyncio.get_event_loop() + self.connections = {} # {(host, port, ssl): [Connection, ...], ...} + self.queue = [] # [Connection, ...] + + def close(self): + """Close all connections available for reuse.""" + for conns in self.connections.values(): + for conn in conns: + conn.close() + self.connections.clear() + del self.queue[:] + + @asyncio.coroutine + def get_connection(self, host, port, ssl): + """Create or reuse a connection.""" + port = port or (443 if ssl else 80) + try: + ipaddrs = yield From(self.loop.getaddrinfo(host, port)) + except Exception as exc: + self.log(0, 'Exception %r for (%r, %r)' % (exc, host, port)) + raise + self.log(1, '* %s resolves to %s' % + (host, ', '.join(ip[4][0] for ip in ipaddrs))) + + # Look for a reusable connection. + for _, _, _, _, addr in ipaddrs: + h, p = addr[:2] + key = h, p, ssl + conn = None + conns = self.connections.get(key) + while conns: + conn = conns.pop(0) + self.queue.remove(conn) + if not conns: + del self.connections[key] + if conn.stale(): + self.log(1, 'closing stale connection for', key) + conn.close() # Just in case. + else: + self.log(1, '* Reusing pooled connection', key, + 'FD =', conn.fileno()) + raise Return(conn) + + # Create a new connection. + conn = Connection(self.log, self, host, port, ssl) + yield From(conn.connect()) + self.log(1, '* New connection', conn.key, 'FD =', conn.fileno()) + raise Return(conn) + + def recycle_connection(self, conn): + """Make a connection available for reuse. + + This also prunes the pool if it exceeds the size limits. + """ + if conn.stale(): + conn.close() + return + + key = conn.key + conns = self.connections.setdefault(key, []) + conns.append(conn) + self.queue.append(conn) + + if len(conns) <= self.max_tasks and len(self.queue) <= self.max_pool: + return + + # Prune the queue. + + # Close stale connections for this key first. + stale = [conn for conn in conns if conn.stale()] + if stale: + for conn in stale: + conns.remove(conn) + self.queue.remove(conn) + self.log(1, 'closing stale connection for', key) + conn.close() + if not conns: + del self.connections[key] + + # Close oldest connection(s) for this key if limit reached. + while len(conns) > self.max_tasks: + conn = conns.pop(0) + self.queue.remove(conn) + self.log(1, 'closing oldest connection for', key) + conn.close() + + if len(self.queue) <= self.max_pool: + return + + # Close overall stale connections. + stale = [conn for conn in self.queue if conn.stale()] + if stale: + for conn in stale: + conns = self.connections.get(conn.key) + conns.remove(conn) + self.queue.remove(conn) + self.log(1, 'closing stale connection for', key) + conn.close() + + # Close oldest overall connection(s) if limit reached. + while len(self.queue) > self.max_pool: + conn = self.queue.pop(0) + conns = self.connections.get(conn.key) + c = conns.pop(0) + assert conn == c, (conn.key, conn, c, conns) + self.log(1, 'closing overall oldest connection for', conn.key) + conn.close() + + +class Connection: + + def __init__(self, log, pool, host, port, ssl): + self.log = log + self.pool = pool + self.host = host + self.port = port + self.ssl = ssl + self.reader = None + self.writer = None + self.key = None + + def stale(self): + return self.reader is None or self.reader.at_eof() + + def fileno(self): + writer = self.writer + if writer is not None: + transport = writer.transport + if transport is not None: + sock = transport.get_extra_info('socket') + if sock is not None: + return sock.fileno() + return None + + @asyncio.coroutine + def connect(self): + self.reader, self.writer = yield From(asyncio.open_connection( + self.host, self.port, ssl=self.ssl)) + peername = self.writer.get_extra_info('peername') + if peername: + self.host, self.port = peername[:2] + else: + self.log(1, 'NO PEERNAME???', self.host, self.port, self.ssl) + self.key = self.host, self.port, self.ssl + + def close(self, recycle=False): + if recycle and not self.stale(): + self.pool.recycle_connection(self) + else: + self.writer.close() + self.pool = self.reader = self.writer = None + + +class Request: + """HTTP request. + + Use connect() to open a connection; send_request() to send the + request; get_response() to receive the response headers. + """ + + def __init__(self, log, url, pool): + self.log = log + self.url = url + self.pool = pool + self.parts = urlparse.urlparse(self.url) + self.scheme = self.parts.scheme + assert self.scheme in ('http', 'https'), repr(url) + self.ssl = self.parts.scheme == 'https' + self.netloc = self.parts.netloc + self.hostname = self.parts.hostname + self.port = self.parts.port or (443 if self.ssl else 80) + self.path = (self.parts.path or '/') + self.query = self.parts.query + if self.query: + self.full_path = '%s?%s' % (self.path, self.query) + else: + self.full_path = self.path + self.http_version = 'HTTP/1.1' + self.method = 'GET' + self.headers = [] + self.conn = None + + @asyncio.coroutine + def connect(self): + """Open a connection to the server.""" + self.log(1, '* Connecting to %s:%s using %s for %s' % + (self.hostname, self.port, + 'ssl' if self.ssl else 'tcp', + self.url)) + self.conn = yield From(self.pool.get_connection(self.hostname, + self.port, self.ssl)) + + def close(self, recycle=False): + """Close the connection, recycle if requested.""" + if self.conn is not None: + if not recycle: + self.log(1, 'closing connection for', self.conn.key) + self.conn.close(recycle) + self.conn = None + + @asyncio.coroutine + def putline(self, line): + """Write a line to the connection. + + Used for the request line and headers. + """ + self.log(2, '>', line) + self.conn.writer.write(line.encode('latin-1') + b'\r\n') + + @asyncio.coroutine + def send_request(self): + """Send the request.""" + request_line = '%s %s %s' % (self.method, self.full_path, + self.http_version) + yield From(self.putline(request_line)) + # TODO: What if a header is already set? + self.headers.append(('User-Agent', 'asyncio-example-crawl/0.0')) + self.headers.append(('Host', self.netloc)) + self.headers.append(('Accept', '*/*')) + ##self.headers.append(('Accept-Encoding', 'gzip')) + for key, value in self.headers: + line = '%s: %s' % (key, value) + yield From(self.putline(line)) + yield From(self.putline('')) + + @asyncio.coroutine + def get_response(self): + """Receive the response.""" + response = Response(self.log, self.conn.reader) + yield From(response.read_headers()) + raise Return(response) + + +class Response: + """HTTP response. + + Call read_headers() to receive the request headers. Then check + the status attribute and call get_header() to inspect the headers. + Finally call read() to receive the body. + """ + + def __init__(self, log, reader): + self.log = log + self.reader = reader + self.http_version = None # 'HTTP/1.1' + self.status = None # 200 + self.reason = None # 'Ok' + self.headers = [] # [('Content-Type', 'text/html')] + + @asyncio.coroutine + def getline(self): + """Read one line from the connection.""" + line = (yield From(self.reader.readline())) + line = line.decode('latin-1').rstrip() + self.log(2, '<', line) + raise Return(line) + + @asyncio.coroutine + def read_headers(self): + """Read the response status and the request headers.""" + status_line = yield From(self.getline()) + status_parts = status_line.split(None, 2) + if len(status_parts) != 3: + self.log(0, 'bad status_line', repr(status_line)) + raise BadStatusLine(status_line) + self.http_version, status, self.reason = status_parts + self.status = int(status) + while True: + header_line = yield From(self.getline()) + if not header_line: + break + # TODO: Continuation lines. + key, value = header_line.split(':', 1) + self.headers.append((key, value.strip())) + + def get_redirect_url(self, default=''): + """Inspect the status and return the redirect url if appropriate.""" + if self.status not in (300, 301, 302, 303, 307): + return default + return self.get_header('Location', default) + + def get_header(self, key, default=''): + """Get one header value, using a case insensitive header name.""" + key = key.lower() + for k, v in self.headers: + if k.lower() == key: + return v + return default + + @asyncio.coroutine + def read(self): + """Read the response body. + + This honors Content-Length and Transfer-Encoding: chunked. + """ + nbytes = None + for key, value in self.headers: + if key.lower() == 'content-length': + nbytes = int(value) + break + if nbytes is None: + if self.get_header('transfer-encoding').lower() == 'chunked': + self.log(2, 'parsing chunked response') + blocks = [] + while True: + size_header = yield From(self.reader.readline()) + if not size_header: + self.log(0, 'premature end of chunked response') + break + self.log(3, 'size_header =', repr(size_header)) + parts = size_header.split(b';') + size = int(parts[0], 16) + if size: + self.log(3, 'reading chunk of', size, 'bytes') + block = yield From(self.reader.readexactly(size)) + assert len(block) == size, (len(block), size) + blocks.append(block) + crlf = yield From(self.reader.readline()) + assert crlf == b'\r\n', repr(crlf) + if not size: + break + body = b''.join(blocks) + self.log(1, 'chunked response had', len(body), + 'bytes in', len(blocks), 'blocks') + else: + self.log(3, 'reading until EOF') + body = yield From(self.reader.read()) + # TODO: Should make sure not to recycle the connection + # in this case. + else: + body = yield From(self.reader.readexactly(nbytes)) + raise Return(body) + + +class Fetcher: + """Logic and state for one URL. + + When found in crawler.busy, this represents a URL to be fetched or + in the process of being fetched; when found in crawler.done, this + holds the results from fetching it. + + This is usually associated with a task. This references the + crawler for the connection pool and to add more URLs to its todo + list. + + Call fetch() to do the fetching, then report() to print the results. + """ + + def __init__(self, log, url, crawler, max_redirect=10, max_tries=4): + self.log = log + self.url = url + self.crawler = crawler + # We don't loop resolving redirects here -- we just use this + # to decide whether to add the redirect URL to crawler.todo. + self.max_redirect = max_redirect + # But we do loop to retry on errors a few times. + self.max_tries = max_tries + # Everything we collect from the response goes here. + self.task = None + self.exceptions = [] + self.tries = 0 + self.request = None + self.response = None + self.body = None + self.next_url = None + self.ctype = None + self.pdict = None + self.encoding = None + self.urls = None + self.new_urls = None + + @asyncio.coroutine + def fetch(self): + """Attempt to fetch the contents of the URL. + + If successful, and the data is HTML, extract further links and + add them to the crawler. Redirects are also added back there. + """ + while self.tries < self.max_tries: + self.tries += 1 + self.request = None + try: + self.request = Request(self.log, self.url, self.crawler.pool) + yield From(self.request.connect()) + yield From(self.request.send_request()) + self.response = yield From(self.request.get_response()) + self.body = yield From(self.response.read()) + h_conn = self.response.get_header('connection').lower() + if h_conn != 'close': + self.request.close(recycle=True) + self.request = None + if self.tries > 1: + self.log(1, 'try', self.tries, 'for', self.url, 'success') + break + except (BadStatusLine, OSError) as exc: + self.exceptions.append(exc) + self.log(1, 'try', self.tries, 'for', self.url, + 'raised', repr(exc)) + ##import pdb; pdb.set_trace() + # Don't reuse the connection in this case. + finally: + if self.request is not None: + self.request.close() + else: + # We never broke out of the while loop, i.e. all tries failed. + self.log(0, 'no success for', self.url, + 'in', self.max_tries, 'tries') + return + next_url = self.response.get_redirect_url() + if next_url: + self.next_url = urlparse.urljoin(self.url, next_url) + if self.max_redirect > 0: + self.log(1, 'redirect to', self.next_url, 'from', self.url) + self.crawler.add_url(self.next_url, self.max_redirect-1) + else: + self.log(0, 'redirect limit reached for', self.next_url, + 'from', self.url) + else: + if self.response.status == 200: + self.ctype = self.response.get_header('content-type') + self.pdict = {} + if self.ctype: + self.ctype, self.pdict = cgi.parse_header(self.ctype) + self.encoding = self.pdict.get('charset', 'utf-8') + if self.ctype == 'text/html': + body = self.body.decode(self.encoding, 'replace') + # Replace href with (?:href|src) to follow image links. + self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)', + body)) + if self.urls: + self.log(1, 'got', len(self.urls), + 'distinct urls from', self.url) + self.new_urls = set() + for url in self.urls: + url = unescape(url) + url = urlparse.urljoin(self.url, url) + url, frag = urlparse.urldefrag(url) + if self.crawler.add_url(url): + self.new_urls.add(url) + + def report(self, stats, file=None): + """Print a report on the state for this URL. + + Also update the Stats instance. + """ + if self.task is not None: + if not self.task.done(): + stats.add('pending') + print(self.url, 'pending', file=file) + return + elif self.task.cancelled(): + stats.add('cancelled') + print(self.url, 'cancelled', file=file) + return + elif self.task.exception(): + stats.add('exception') + exc = self.task.exception() + stats.add('exception_' + exc.__class__.__name__) + print(self.url, exc, file=file) + return + if len(self.exceptions) == self.tries: + stats.add('fail') + exc = self.exceptions[-1] + stats.add('fail_' + str(exc.__class__.__name__)) + print(self.url, 'error', exc, file=file) + elif self.next_url: + stats.add('redirect') + print(self.url, self.response.status, 'redirect', self.next_url, + file=file) + elif self.ctype == 'text/html': + stats.add('html') + size = len(self.body or b'') + stats.add('html_bytes', size) + print(self.url, self.response.status, + self.ctype, self.encoding, + size, + '%d/%d' % (len(self.new_urls or ()), len(self.urls or ())), + file=file) + elif self.response is None: + print(self.url, 'no response object') + else: + size = len(self.body or b'') + if self.response.status == 200: + stats.add('other') + stats.add('other_bytes', size) + else: + stats.add('error') + stats.add('error_bytes', size) + stats.add('status_%s' % self.response.status) + print(self.url, self.response.status, + self.ctype, self.encoding, + size, + file=file) + + +class Stats: + """Record stats of various sorts.""" + + def __init__(self): + self.stats = {} + + def add(self, key, count=1): + self.stats[key] = self.stats.get(key, 0) + count + + def report(self, file=None): + for key, count in sorted(self.stats.items()): + print('%10d' % count, key, file=file) + + +class Crawler: + """Crawl a set of URLs. + + This manages three disjoint sets of URLs (todo, busy, done). The + data structures actually store dicts -- the values in todo give + the redirect limit, while the values in busy and done are Fetcher + instances. + """ + def __init__(self, log, + roots, exclude=None, strict=True, # What to crawl. + max_redirect=10, max_tries=4, # Per-url limits. + max_tasks=10, max_pool=10, # Global limits. + ): + self.log = log + self.roots = roots + self.exclude = exclude + self.strict = strict + self.max_redirect = max_redirect + self.max_tries = max_tries + self.max_tasks = max_tasks + self.max_pool = max_pool + self.todo = {} + self.busy = {} + self.done = {} + self.pool = ConnectionPool(self.log, max_pool, max_tasks) + self.root_domains = set() + for root in roots: + parts = urlparse.urlparse(root) + host, port = urllib_splitport(parts.netloc) + if not host: + continue + if re.match(r'\A[\d\.]*\Z', host): + self.root_domains.add(host) + else: + host = host.lower() + if self.strict: + self.root_domains.add(host) + if host.startswith('www.'): + self.root_domains.add(host[4:]) + else: + self.root_domains.add('www.' + host) + else: + parts = host.split('.') + if len(parts) > 2: + host = '.'.join(parts[-2:]) + self.root_domains.add(host) + for root in roots: + self.add_url(root) + self.governor = asyncio.locks.Semaphore(max_tasks) + self.termination = asyncio.locks.Condition() + self.t0 = time.time() + self.t1 = None + + def close(self): + """Close resources (currently only the pool).""" + self.pool.close() + + def host_okay(self, host): + """Check if a host should be crawled. + + A literal match (after lowercasing) is always good. For hosts + that don't look like IP addresses, some approximate matches + are okay depending on the strict flag. + """ + host = host.lower() + if host in self.root_domains: + return True + if re.match(r'\A[\d\.]*\Z', host): + return False + if self.strict: + return self._host_okay_strictish(host) + else: + return self._host_okay_lenient(host) + + def _host_okay_strictish(self, host): + """Check if a host should be crawled, strict-ish version. + + This checks for equality modulo an initial 'www.' component. + """ + if host.startswith('www.'): + if host[4:] in self.root_domains: + return True + else: + if 'www.' + host in self.root_domains: + return True + return False + + def _host_okay_lenient(self, host): + """Check if a host should be crawled, lenient version. + + This compares the last two components of the host. + """ + parts = host.split('.') + if len(parts) > 2: + host = '.'.join(parts[-2:]) + return host in self.root_domains + + def add_url(self, url, max_redirect=None): + """Add a URL to the todo list if not seen before.""" + if self.exclude and re.search(self.exclude, url): + return False + parts = urlparse.urlparse(url) + if parts.scheme not in ('http', 'https'): + self.log(2, 'skipping non-http scheme in', url) + return False + host, port = urllib_splitport(parts.netloc) + if not self.host_okay(host): + self.log(2, 'skipping non-root host in', url) + return False + if max_redirect is None: + max_redirect = self.max_redirect + if url in self.todo or url in self.busy or url in self.done: + return False + self.log(1, 'adding', url, max_redirect) + self.todo[url] = max_redirect + return True + + @asyncio.coroutine + def crawl(self): + """Run the crawler until all finished.""" + with (yield From(self.termination)): + while self.todo or self.busy: + if self.todo: + url, max_redirect = self.todo.popitem() + fetcher = Fetcher(self.log, url, + crawler=self, + max_redirect=max_redirect, + max_tries=self.max_tries, + ) + self.busy[url] = fetcher + fetcher.task = asyncio.Task(self.fetch(fetcher)) + else: + yield From(self.termination.wait()) + self.t1 = time.time() + + @asyncio.coroutine + def fetch(self, fetcher): + """Call the Fetcher's fetch(), with a limit on concurrency. + + Once this returns, move the fetcher from busy to done. + """ + url = fetcher.url + with (yield From(self.governor)): + try: + yield From(fetcher.fetch()) # Fetcher gonna fetch. + finally: + # Force GC of the task, so the error is logged. + fetcher.task = None + with (yield From(self.termination)): + self.done[url] = fetcher + del self.busy[url] + self.termination.notify() + + def report(self, file=None): + """Print a report on all completed URLs.""" + if self.t1 is None: + self.t1 = time.time() + dt = self.t1 - self.t0 + if dt and self.max_tasks: + speed = len(self.done) / dt / self.max_tasks + else: + speed = 0 + stats = Stats() + print('*** Report ***', file=file) + try: + show = [] + show.extend(self.done.items()) + show.extend(self.busy.items()) + show.sort() + for url, fetcher in show: + fetcher.report(stats, file=file) + except KeyboardInterrupt: + print('\nInterrupted', file=file) + print('Finished', len(self.done), + 'urls in %.3f secs' % dt, + '(max_tasks=%d)' % self.max_tasks, + '(%.3f urls/sec/task)' % speed, + file=file) + stats.report(file=file) + print('Todo:', len(self.todo), file=file) + print('Busy:', len(self.busy), file=file) + print('Done:', len(self.done), file=file) + print('Date:', time.ctime(), 'local time', file=file) + + +def main(): + """Main program. + + Parse arguments, set up event loop, run crawler, print report. + """ + args = ARGS.parse_args() + if not args.roots: + print('Use --help for command line help') + return + + log = Logger(args.level) + + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + asyncio.set_event_loop(loop) + elif args.select: + loop = asyncio.SelectorEventLoop() + asyncio.set_event_loop(loop) + else: + loop = asyncio.get_event_loop() + + roots = {fix_url(root) for root in args.roots} + + crawler = Crawler(log, + roots, exclude=args.exclude, + strict=args.strict, + max_redirect=args.max_redirect, + max_tries=args.max_tries, + max_tasks=args.max_tasks, + max_pool=args.max_pool, + ) + try: + loop.run_until_complete(crawler.crawl()) # Crawler gonna crawl. + except KeyboardInterrupt: + sys.stderr.flush() + print('\nInterrupted\n') + finally: + crawler.report() + crawler.close() + loop.close() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/examples/echo_client_tulip.py b/examples/echo_client_tulip.py new file mode 100644 index 00000000..0a609260 --- /dev/null +++ b/examples/echo_client_tulip.py @@ -0,0 +1,21 @@ +import trollius as asyncio +from trollius import From + +END = b'Bye-bye!\n' + +@asyncio.coroutine +def echo_client(): + reader, writer = yield From(asyncio.open_connection('localhost', 8000)) + writer.write(b'Hello, world\n') + writer.write(b'What a fine day it is.\n') + writer.write(END) + while True: + line = yield From(reader.readline()) + print('received:', line) + if line == END or not line: + break + writer.close() + +loop = asyncio.get_event_loop() +loop.run_until_complete(echo_client()) +loop.close() diff --git a/examples/echo_server_tulip.py b/examples/echo_server_tulip.py new file mode 100644 index 00000000..d7e6e29d --- /dev/null +++ b/examples/echo_server_tulip.py @@ -0,0 +1,21 @@ +import trollius as asyncio +from trollius import From + +@asyncio.coroutine +def echo_server(): + yield From(asyncio.start_server(handle_connection, 'localhost', 8000)) + +@asyncio.coroutine +def handle_connection(reader, writer): + while True: + data = yield From(reader.read(8192)) + if not data: + break + writer.write(data) + +loop = asyncio.get_event_loop() +loop.run_until_complete(echo_server()) +try: + loop.run_forever() +finally: + loop.close() diff --git a/examples/fetch0.py b/examples/fetch0.py new file mode 100644 index 00000000..f98feeb3 --- /dev/null +++ b/examples/fetch0.py @@ -0,0 +1,36 @@ +"""Simplest possible HTTP client.""" + +from __future__ import print_function +import sys + +from trollius import * + + +@coroutine +def fetch(): + r, w = yield From(open_connection('python.org', 80)) + request = 'GET / HTTP/1.0\r\n\r\n' + print('>', request, file=sys.stderr) + w.write(request.encode('latin-1')) + while True: + line = yield From(r.readline()) + line = line.decode('latin-1').rstrip() + if not line: + break + print('<', line, file=sys.stderr) + print(file=sys.stderr) + body = yield From(r.read()) + raise Return(body) + + +def main(): + loop = get_event_loop() + try: + body = loop.run_until_complete(fetch()) + finally: + loop.close() + print(body.decode('latin-1'), end='') + + +if __name__ == '__main__': + main() diff --git a/examples/fetch1.py b/examples/fetch1.py new file mode 100644 index 00000000..9e9a1caf --- /dev/null +++ b/examples/fetch1.py @@ -0,0 +1,84 @@ +"""Fetch one URL and write its content to stdout. + +This version adds URL parsing (including SSL) and a Response object. +""" + +from __future__ import print_function +import sys +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +from trollius import * + + +class Response: + + def __init__(self, verbose=True): + self.verbose = verbose + self.http_version = None # 'HTTP/1.1' + self.status = None # 200 + self.reason = None # 'Ok' + self.headers = [] # [('Content-Type', 'text/html')] + + @coroutine + def read(self, reader): + @coroutine + def getline(): + line = (yield From(reader.readline())) + line = line.decode('latin-1').rstrip() + raise Return(line) + status_line = yield From(getline()) + if self.verbose: print('<', status_line, file=sys.stderr) + self.http_version, status, self.reason = status_line.split(None, 2) + self.status = int(status) + while True: + header_line = yield From(getline()) + if not header_line: + break + if self.verbose: print('<', header_line, file=sys.stderr) + # TODO: Continuation lines. + key, value = header_line.split(':', 1) + self.headers.append((key, value.strip())) + if self.verbose: print(file=sys.stderr) + + +@coroutine +def fetch(url, verbose=True): + parts = urlparse(url) + if parts.scheme == 'http': + ssl = False + elif parts.scheme == 'https': + ssl = True + else: + print('URL must use http or https.') + sys.exit(1) + port = parts.port + if port is None: + port = 443 if ssl else 80 + path = parts.path or '/' + if parts.query: + path += '?' + parts.query + request = 'GET %s HTTP/1.0\r\n\r\n' % path + if verbose: + print('>', request, file=sys.stderr, end='') + r, w = yield From(open_connection(parts.hostname, port, ssl=ssl)) + w.write(request.encode('latin-1')) + response = Response(verbose) + yield From(response.read(r)) + body = yield From(r.read()) + raise Return(body) + + +def main(): + loop = get_event_loop() + try: + body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv)) + finally: + loop.close() + print(body.decode('latin-1'), end='') + + +if __name__ == '__main__': + main() diff --git a/examples/fetch2.py b/examples/fetch2.py new file mode 100644 index 00000000..5a321a8a --- /dev/null +++ b/examples/fetch2.py @@ -0,0 +1,153 @@ +"""Fetch one URL and write its content to stdout. + +This version adds a Request object. +""" + +from __future__ import print_function +import sys +try: + from urllib.parse import urlparse + from http.client import BadStatusLine +except ImportError: + # Python 2 + from urlparse import urlparse + from httplib import BadStatusLine + +from trollius import * + + +class Request: + + def __init__(self, url, verbose=True): + self.url = url + self.verbose = verbose + self.parts = urlparse(self.url) + self.scheme = self.parts.scheme + assert self.scheme in ('http', 'https'), repr(url) + self.ssl = self.parts.scheme == 'https' + self.netloc = self.parts.netloc + self.hostname = self.parts.hostname + self.port = self.parts.port or (443 if self.ssl else 80) + self.path = (self.parts.path or '/') + self.query = self.parts.query + if self.query: + self.full_path = '%s?%s' % (self.path, self.query) + else: + self.full_path = self.path + self.http_version = 'HTTP/1.1' + self.method = 'GET' + self.headers = [] + self.reader = None + self.writer = None + + @coroutine + def connect(self): + if self.verbose: + print('* Connecting to %s:%s using %s' % + (self.hostname, self.port, 'ssl' if self.ssl else 'tcp'), + file=sys.stderr) + self.reader, self.writer = yield From(open_connection(self.hostname, + self.port, + ssl=self.ssl)) + if self.verbose: + print('* Connected to %s' % + (self.writer.get_extra_info('peername'),), + file=sys.stderr) + + def putline(self, line): + self.writer.write(line.encode('latin-1') + b'\r\n') + + @coroutine + def send_request(self): + request = '%s %s %s' % (self.method, self.full_path, self.http_version) + if self.verbose: print('>', request, file=sys.stderr) + self.putline(request) + if 'host' not in {key.lower() for key, _ in self.headers}: + self.headers.insert(0, ('Host', self.netloc)) + for key, value in self.headers: + line = '%s: %s' % (key, value) + if self.verbose: print('>', line, file=sys.stderr) + self.putline(line) + self.putline('') + + @coroutine + def get_response(self): + response = Response(self.reader, self.verbose) + yield From(response.read_headers()) + raise Return(response) + + +class Response: + + def __init__(self, reader, verbose=True): + self.reader = reader + self.verbose = verbose + self.http_version = None # 'HTTP/1.1' + self.status = None # 200 + self.reason = None # 'Ok' + self.headers = [] # [('Content-Type', 'text/html')] + + @coroutine + def getline(self): + line = (yield From(self.reader.readline())) + line = line.decode('latin-1').rstrip() + raise Return(line) + + @coroutine + def read_headers(self): + status_line = yield From(self.getline()) + if self.verbose: print('<', status_line, file=sys.stderr) + status_parts = status_line.split(None, 2) + if len(status_parts) != 3: + raise BadStatusLine(status_line) + self.http_version, status, self.reason = status_parts + self.status = int(status) + while True: + header_line = yield From(self.getline()) + if not header_line: + break + if self.verbose: print('<', header_line, file=sys.stderr) + # TODO: Continuation lines. + key, value = header_line.split(':', 1) + self.headers.append((key, value.strip())) + if self.verbose: print(file=sys.stderr) + + @coroutine + def read(self): + nbytes = None + for key, value in self.headers: + if key.lower() == 'content-length': + nbytes = int(value) + break + if nbytes is None: + body = yield From(self.reader.read()) + else: + body = yield From(self.reader.readexactly(nbytes)) + raise Return(body) + + +@coroutine +def fetch(url, verbose=True): + request = Request(url, verbose) + yield From(request.connect()) + yield From(request.send_request()) + response = yield From(request.get_response()) + body = yield From(response.read()) + raise Return(body) + + +def main(): + loop = get_event_loop() + try: + body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv)) + finally: + loop.close() + if hasattr(sys.stdout, 'buffer'): + sys.stdout.buffer.write(body) + else: + # Python 2 + sys.stdout.write(body) + + +if __name__ == '__main__': + main() diff --git a/examples/fetch3.py b/examples/fetch3.py new file mode 100644 index 00000000..0fc56d1d --- /dev/null +++ b/examples/fetch3.py @@ -0,0 +1,243 @@ +"""Fetch one URL and write its content to stdout. + +This version adds a primitive connection pool, redirect following and +chunked transfer-encoding. It also supports a --iocp flag. +""" + +from __future__ import print_function +import sys +try: + from urllib.parse import urlparse + from http.client import BadStatusLine +except ImportError: + # Python 2 + from urlparse import urlparse + from httplib import BadStatusLine + +from trollius import * + + +class ConnectionPool: + # TODO: Locking? Close idle connections? + + def __init__(self, verbose=False): + self.verbose = verbose + self.connections = {} # {(host, port, ssl): (reader, writer)} + + def close(self): + for _, writer in self.connections.values(): + writer.close() + + @coroutine + def open_connection(self, host, port, ssl): + port = port or (443 if ssl else 80) + ipaddrs = yield From(get_event_loop().getaddrinfo(host, port)) + if self.verbose: + print('* %s resolves to %s' % + (host, ', '.join(ip[4][0] for ip in ipaddrs)), + file=sys.stderr) + for _, _, _, _, addr in ipaddrs: + h, p = addr[:2] + key = h, p, ssl + conn = self.connections.get(key) + if conn: + reader, writer = conn + if reader._eof: + self.connections.pop(key) + continue + if self.verbose: + print('* Reusing pooled connection', key, file=sys.stderr) + raise Return(conn) + reader, writer = yield From(open_connection(host, port, ssl=ssl)) + addr = writer.get_extra_info('peername') + host, port = addr[:2] + key = host, port, ssl + self.connections[key] = reader, writer + if self.verbose: + print('* New connection', key, file=sys.stderr) + raise Return(reader, writer) + + +class Request: + + def __init__(self, url, verbose=True): + self.url = url + self.verbose = verbose + self.parts = urlparse(self.url) + self.scheme = self.parts.scheme + assert self.scheme in ('http', 'https'), repr(url) + self.ssl = self.parts.scheme == 'https' + self.netloc = self.parts.netloc + self.hostname = self.parts.hostname + self.port = self.parts.port or (443 if self.ssl else 80) + self.path = (self.parts.path or '/') + self.query = self.parts.query + if self.query: + self.full_path = '%s?%s' % (self.path, self.query) + else: + self.full_path = self.path + self.http_version = 'HTTP/1.1' + self.method = 'GET' + self.headers = [] + self.reader = None + self.writer = None + + def vprint(self, *args): + if self.verbose: + print(*args, file=sys.stderr) + + @coroutine + def connect(self, pool): + self.vprint('* Connecting to %s:%s using %s' % + (self.hostname, self.port, 'ssl' if self.ssl else 'tcp')) + self.reader, self.writer = \ + yield From(pool.open_connection(self.hostname, + self.port, + ssl=self.ssl)) + self.vprint('* Connected to %s' % + (self.writer.get_extra_info('peername'),)) + + @coroutine + def putline(self, line): + self.vprint('>', line) + self.writer.write(line.encode('latin-1') + b'\r\n') + ##yield From(self.writer.drain()) + + @coroutine + def send_request(self): + request = '%s %s %s' % (self.method, self.full_path, self.http_version) + yield From(self.putline(request)) + if 'host' not in {key.lower() for key, _ in self.headers}: + self.headers.insert(0, ('Host', self.netloc)) + for key, value in self.headers: + line = '%s: %s' % (key, value) + yield From(self.putline(line)) + yield From(self.putline('')) + + @coroutine + def get_response(self): + response = Response(self.reader, self.verbose) + yield From(response.read_headers()) + raise Return(response) + + +class Response: + + def __init__(self, reader, verbose=True): + self.reader = reader + self.verbose = verbose + self.http_version = None # 'HTTP/1.1' + self.status = None # 200 + self.reason = None # 'Ok' + self.headers = [] # [('Content-Type', 'text/html')] + + def vprint(self, *args): + if self.verbose: + print(*args, file=sys.stderr) + + @coroutine + def getline(self): + line = (yield From(self.reader.readline())) + line = line.decode('latin-1').rstrip() + self.vprint('<', line) + raise Return(line) + + @coroutine + def read_headers(self): + status_line = yield From(self.getline()) + status_parts = status_line.split(None, 2) + if len(status_parts) != 3: + raise BadStatusLine(status_line) + self.http_version, status, self.reason = status_parts + self.status = int(status) + while True: + header_line = yield From(self.getline()) + if not header_line: + break + # TODO: Continuation lines. + key, value = header_line.split(':', 1) + self.headers.append((key, value.strip())) + + def get_redirect_url(self, default=None): + if self.status not in (300, 301, 302, 303, 307): + return default + return self.get_header('Location', default) + + def get_header(self, key, default=None): + key = key.lower() + for k, v in self.headers: + if k.lower() == key: + return v + return default + + @coroutine + def read(self): + nbytes = None + for key, value in self.headers: + if key.lower() == 'content-length': + nbytes = int(value) + break + if nbytes is None: + if self.get_header('transfer-encoding', '').lower() == 'chunked': + blocks = [] + size = -1 + while size: + size_header = yield From(self.reader.readline()) + if not size_header: + break + parts = size_header.split(b';') + size = int(parts[0], 16) + if size: + block = yield From(self.reader.readexactly(size)) + assert len(block) == size, (len(block), size) + blocks.append(block) + crlf = yield From(self.reader.readline()) + assert crlf == b'\r\n', repr(crlf) + body = b''.join(blocks) + else: + body = yield From(self.reader.read()) + else: + body = yield From(self.reader.readexactly(nbytes)) + raise Return(body) + + +@coroutine +def fetch(url, verbose=True, max_redirect=10): + pool = ConnectionPool(verbose) + try: + for _ in range(max_redirect): + request = Request(url, verbose) + yield From(request.connect(pool)) + yield From(request.send_request()) + response = yield From(request.get_response()) + body = yield From(response.read()) + next_url = response.get_redirect_url() + if not next_url: + break + url = urllib.parse.urljoin(url, next_url) + print('redirect to', url, file=sys.stderr) + raise Return(body) + finally: + pool.close() + + +def main(): + if '--iocp' in sys.argv: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + set_event_loop(loop) + else: + loop = get_event_loop() + try: + body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv)) + finally: + loop.close() + if hasattr(sys.stdout, 'buffer'): + sys.stdout.buffer.write(body) + else: + # Python 2 + sys.stdout.write(body) + + +if __name__ == '__main__': + main() diff --git a/examples/fuzz_as_completed.py b/examples/fuzz_as_completed.py new file mode 100644 index 00000000..7e74fe78 --- /dev/null +++ b/examples/fuzz_as_completed.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +"""Fuzz tester for as_completed(), by Glenn Langford.""" + +from __future__ import print_function + +import trollius as asyncio +from trollius import From, Return +import itertools +import random +import sys + +@asyncio.coroutine +def sleeper(time): + yield From(asyncio.sleep(time)) + raise Return(time) + +@asyncio.coroutine +def watcher(tasks,delay=False): + res = [] + for t in asyncio.as_completed(tasks): + r = yield From(t) + res.append(r) + if delay: + # simulate processing delay + process_time = random.random() / 10 + yield From(asyncio.sleep(process_time)) + #print(res) + #assert(sorted(res) == res) + if sorted(res) != res: + print('FAIL', res) + print('------------') + else: + print('.', end='') + sys.stdout.flush() + +loop = asyncio.get_event_loop() + +print('Pass 1') +# All permutations of discrete task running times must be returned +# by as_completed in the correct order. +task_times = [0, 0.1, 0.2, 0.3, 0.4 ] # 120 permutations +for times in itertools.permutations(task_times): + tasks = [ asyncio.Task(sleeper(t)) for t in times ] + loop.run_until_complete(asyncio.Task(watcher(tasks))) + +print() +print('Pass 2') +# Longer task times, with randomized duplicates. 100 tasks each time. +longer_task_times = [x/10 for x in range(30)] +for i in range(20): + task_times = longer_task_times * 10 + random.shuffle(task_times) + #print('Times', task_times[:500]) + tasks = [ asyncio.Task(sleeper(t)) for t in task_times[:100] ] + loop.run_until_complete(asyncio.Task(watcher(tasks))) + +print() +print('Pass 3') +# Same as pass 2, but with a random processing delay (0 - 0.1s) after +# retrieving each future from as_completed and 200 tasks. This tests whether +# the order that callbacks are triggered is preserved through to the +# as_completed caller. +for i in range(20): + task_times = longer_task_times * 10 + random.shuffle(task_times) + #print('Times', task_times[:200]) + tasks = [ asyncio.Task(sleeper(t)) for t in task_times[:200] ] + loop.run_until_complete(asyncio.Task(watcher(tasks, delay=True))) + +print() +loop.close() diff --git a/examples/hello_callback.py b/examples/hello_callback.py new file mode 100644 index 00000000..f192c8dc --- /dev/null +++ b/examples/hello_callback.py @@ -0,0 +1,17 @@ +"""Print 'Hello World' every two seconds, using a callback.""" + +import trollius + + +def print_and_repeat(loop): + print('Hello World') + loop.call_later(2, print_and_repeat, loop) + + +if __name__ == '__main__': + loop = trollius.get_event_loop() + print_and_repeat(loop) + try: + loop.run_forever() + finally: + loop.close() diff --git a/examples/hello_coroutine.py b/examples/hello_coroutine.py new file mode 100644 index 00000000..e6a4e6ca --- /dev/null +++ b/examples/hello_coroutine.py @@ -0,0 +1,19 @@ +"""Print 'Hello World' every two seconds, using a coroutine.""" + +import trollius +from trollius import From + + +@trollius.coroutine +def greet_every_two_seconds(): + while True: + print('Hello World') + yield From(trollius.sleep(2)) + + +if __name__ == '__main__': + loop = trollius.get_event_loop() + try: + loop.run_until_complete(greet_every_two_seconds()) + finally: + loop.close() diff --git a/examples/interop_asyncio.py b/examples/interop_asyncio.py new file mode 100644 index 00000000..b20e3edb --- /dev/null +++ b/examples/interop_asyncio.py @@ -0,0 +1,53 @@ +import asyncio +import trollius + +@asyncio.coroutine +def asyncio_noop(): + pass + +@asyncio.coroutine +def asyncio_coroutine(coro): + print("asyncio coroutine") + res = yield from coro + print("asyncio inner coroutine result: %r" % (res,)) + print("asyncio coroutine done") + return "asyncio" + +@trollius.coroutine +def trollius_noop(): + pass + +@trollius.coroutine +def trollius_coroutine(coro): + print("trollius coroutine") + res = yield trollius.From(coro) + print("trollius inner coroutine result: %r" % (res,)) + print("trollius coroutine done") + raise trollius.Return("trollius") + +def main(): + # use trollius event loop policy in asyncio + policy = trollius.get_event_loop_policy() + asyncio.set_event_loop_policy(policy) + + # create an event loop for the main thread: use Trollius event loop + loop = trollius.get_event_loop() + assert asyncio.get_event_loop() is loop + + print("[ asyncio coroutine called from trollius coroutine ]") + coro1 = asyncio_noop() + coro2 = asyncio_coroutine(coro1) + res = loop.run_until_complete(trollius_coroutine(coro2)) + print("trollius coroutine result: %r" % res) + print("") + + print("[ asyncio coroutine called from trollius coroutine ]") + coro1 = trollius_noop() + coro2 = trollius_coroutine(coro1) + res = loop.run_until_complete(asyncio_coroutine(coro2)) + print("asyncio coroutine result: %r" % res) + print("") + + loop.close() + +main() diff --git a/examples/shell.py b/examples/shell.py new file mode 100644 index 00000000..c78594ed --- /dev/null +++ b/examples/shell.py @@ -0,0 +1,55 @@ +"""Examples using create_subprocess_exec() and create_subprocess_shell().""" + +import trollius as asyncio +from trollius import From +import signal +from trollius.subprocess import PIPE +from trollius.py33_exceptions import ProcessLookupError + +@asyncio.coroutine +def cat(loop): + proc = yield From(asyncio.create_subprocess_shell("cat", + stdin=PIPE, + stdout=PIPE)) + print("pid: %s" % proc.pid) + + message = "Hello World!" + print("cat write: %r" % message) + + stdout, stderr = yield From(proc.communicate(message.encode('ascii'))) + print("cat read: %r" % stdout.decode('ascii')) + + exitcode = yield From(proc.wait()) + print("(exit code %s)" % exitcode) + +@asyncio.coroutine +def ls(loop): + proc = yield From(asyncio.create_subprocess_exec("ls", + stdout=PIPE)) + while True: + line = yield From(proc.stdout.readline()) + if not line: + break + print("ls>>", line.decode('ascii').rstrip()) + try: + proc.send_signal(signal.SIGINT) + except ProcessLookupError: + pass + +@asyncio.coroutine +def test_call(*args, **kw): + timeout = kw.pop('timeout', None) + try: + proc = yield From(asyncio.create_subprocess_exec(*args)) + exitcode = yield From(asyncio.wait_for(proc.wait(), timeout)) + print("%s: exit code %s" % (' '.join(args), exitcode)) + except asyncio.TimeoutError: + print("timeout! (%.1f sec)" % timeout) + proc.kill() + yield From(proc.wait()) + +loop = asyncio.get_event_loop() +loop.run_until_complete(cat(loop)) +loop.run_until_complete(ls(loop)) +loop.run_until_complete(test_call("bash", "-c", "sleep 3", timeout=1.0)) +loop.close() diff --git a/examples/simple_tcp_server.py b/examples/simple_tcp_server.py new file mode 100644 index 00000000..247f6e6c --- /dev/null +++ b/examples/simple_tcp_server.py @@ -0,0 +1,160 @@ +""" +Example of a simple TCP server that is written in (mostly) coroutine +style and uses asyncio.streams.start_server() and +asyncio.streams.open_connection(). + +Note that running this example starts both the TCP server and client +in the same process. It listens on port 12345 on 127.0.0.1, so it will +fail if this port is currently in use. +""" + +from __future__ import print_function +import sys +import trollius as asyncio +import asyncio.streams +from trollius import From, Return + + +class MyServer: + """ + This is just an example of how a TCP server might be potentially + structured. This class has basically 3 methods: start the server, + handle a client, and stop the server. + + Note that you don't have to follow this structure, it is really + just an example or possible starting point. + """ + + def __init__(self): + self.server = None # encapsulates the server sockets + + # this keeps track of all the clients that connected to our + # server. It can be useful in some cases, for instance to + # kill client connections or to broadcast some data to all + # clients... + self.clients = {} # task -> (reader, writer) + + def _accept_client(self, client_reader, client_writer): + """ + This method accepts a new client connection and creates a Task + to handle this client. self.clients is updated to keep track + of the new client. + """ + + # start a new Task to handle this specific client connection + task = asyncio.Task(self._handle_client(client_reader, client_writer)) + self.clients[task] = (client_reader, client_writer) + + def client_done(task): + print("client task done:", task, file=sys.stderr) + del self.clients[task] + + task.add_done_callback(client_done) + + @asyncio.coroutine + def _handle_client(self, client_reader, client_writer): + """ + This method actually does the work to handle the requests for + a specific client. The protocol is line oriented, so there is + a main loop that reads a line with a request and then sends + out one or more lines back to the client with the result. + """ + while True: + data = (yield From(client_reader.readline())) + data = data.decode("utf-8") + if not data: # an empty string means the client disconnected + break + parts = data.rstrip().split(' ') + cmd = parts[0] + args = parts[1:] + if cmd == 'add': + arg1 = float(args[0]) + arg2 = float(args[1]) + retval = arg1 + arg2 + client_writer.write("{0!r}\n".format(retval).encode("utf-8")) + elif cmd == 'repeat': + times = int(args[0]) + msg = args[1] + client_writer.write("begin\n".encode("utf-8")) + for idx in range(times): + client_writer.write("{0}. {1}\n".format(idx+1, msg) + .encode("utf-8")) + client_writer.write("end\n".encode("utf-8")) + else: + print("Bad command {0!r}".format(data), file=sys.stderr) + + # This enables us to have flow control in our connection. + yield From(client_writer.drain()) + + def start(self, loop): + """ + Starts the TCP server, so that it listens on port 12345. + + For each client that connects, the accept_client method gets + called. This method runs the loop until the server sockets + are ready to accept connections. + """ + self.server = loop.run_until_complete( + asyncio.streams.start_server(self._accept_client, + '127.0.0.1', 12345, + loop=loop)) + + def stop(self, loop): + """ + Stops the TCP server, i.e. closes the listening socket(s). + + This method runs the loop until the server sockets are closed. + """ + if self.server is not None: + self.server.close() + loop.run_until_complete(self.server.wait_closed()) + self.server = None + + +def main(): + loop = asyncio.get_event_loop() + + # creates a server and starts listening to TCP connections + server = MyServer() + server.start(loop) + + @asyncio.coroutine + def client(): + reader, writer = yield From(asyncio.streams.open_connection( + '127.0.0.1', 12345, loop=loop)) + + def send(msg): + print("> " + msg) + writer.write((msg + '\n').encode("utf-8")) + + def recv(): + msgback = (yield From(reader.readline())) + msgback = msgback.decode("utf-8").rstrip() + print("< " + msgback) + raise Return(msgback) + + # send a line + send("add 1 2") + msg = yield From(recv()) + + send("repeat 5 hello") + msg = yield From(recv()) + assert msg == 'begin' + while True: + msg = yield From(recv()) + if msg == 'end': + break + + writer.close() + yield From(asyncio.sleep(0.5)) + + # creates a client and connects to our server + try: + loop.run_until_complete(client()) + server.stop(loop) + finally: + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/sink.py b/examples/sink.py new file mode 100644 index 00000000..fb28adef --- /dev/null +++ b/examples/sink.py @@ -0,0 +1,96 @@ +"""Test service that accepts connections and reads all data off them.""" + +from __future__ import print_function +import argparse +import os +import sys + +from trollius import * + +ARGS = argparse.ArgumentParser(description="TCP data sink example.") +ARGS.add_argument( + '--tls', action='store_true', dest='tls', + default=False, help='Use TLS with a self-signed cert') +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--host', action='store', dest='host', + default='127.0.0.1', help='Host name') +ARGS.add_argument( + '--port', action='store', dest='port', + default=1111, type=int, help='Port number') +ARGS.add_argument( + '--maxsize', action='store', dest='maxsize', + default=16*1024*1024, type=int, help='Max total data size') + +server = None +args = None + + +def dprint(*args): + print('sink:', *args, file=sys.stderr) + + +class Service(Protocol): + + def connection_made(self, tr): + dprint('connection from', tr.get_extra_info('peername')) + dprint('my socket is', tr.get_extra_info('sockname')) + self.tr = tr + self.total = 0 + + def data_received(self, data): + if data == b'stop': + dprint('stopping server') + server.close() + self.tr.close() + return + self.total += len(data) + dprint('received', len(data), 'bytes; total', self.total) + if self.total > args.maxsize: + dprint('closing due to too much data') + self.tr.close() + + def connection_lost(self, how): + dprint('closed', repr(how)) + + +@coroutine +def start(loop, host, port): + global server + sslctx = None + if args.tls: + import ssl + # TODO: take cert/key from args as well. + here = os.path.join(os.path.dirname(__file__), '..', 'tests') + sslctx = SSLContext(ssl.PROTOCOL_SSLv23) + if not BACKPORT_SSL_CONTEXT: + sslctx.options |= ssl.OP_NO_SSLv2 + sslctx.load_cert_chain( + certfile=os.path.join(here, 'ssl_cert.pem'), + keyfile=os.path.join(here, 'ssl_key.pem')) + + server = yield From(loop.create_server(Service, host, port, ssl=sslctx)) + dprint('serving TLS' if sslctx else 'serving', + [s.getsockname() for s in server.sockets]) + yield From(server.wait_closed()) + + +def main(): + global args + args = ARGS.parse_args() + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + set_event_loop(loop) + else: + loop = get_event_loop() + try: + loop.run_until_complete(start(loop, args.host, args.port)) + finally: + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/source.py b/examples/source.py new file mode 100644 index 00000000..c3ebd558 --- /dev/null +++ b/examples/source.py @@ -0,0 +1,101 @@ +"""Test client that connects and sends infinite data.""" + +from __future__ import print_function +import argparse +import sys + +from trollius import * +from trollius import test_utils + + +ARGS = argparse.ArgumentParser(description="TCP data sink example.") +ARGS.add_argument( + '--tls', action='store_true', dest='tls', + default=False, help='Use TLS') +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--stop', action='store_true', dest='stop', + default=False, help='Stop the server by sending it b"stop" as data') +ARGS.add_argument( + '--host', action='store', dest='host', + default='127.0.0.1', help='Host name') +ARGS.add_argument( + '--port', action='store', dest='port', + default=1111, type=int, help='Port number') +ARGS.add_argument( + '--size', action='store', dest='size', + default=16*1024, type=int, help='Data size') + +args = None + + +def dprint(*args): + print('source:', *args, file=sys.stderr) + + +class Client(Protocol): + + total = 0 + + def connection_made(self, tr): + dprint('connecting to', tr.get_extra_info('peername')) + dprint('my socket is', tr.get_extra_info('sockname')) + self.tr = tr + self.lost = False + self.loop = get_event_loop() + self.waiter = Future() + if args.stop: + self.tr.write(b'stop') + self.tr.close() + else: + self.data = b'x'*args.size + self.write_some_data() + + def write_some_data(self): + if self.lost: + dprint('lost already') + return + data = self.data + size = len(data) + self.total += size + dprint('writing', size, 'bytes; total', self.total) + self.tr.write(data) + self.loop.call_soon(self.write_some_data) + + def connection_lost(self, exc): + dprint('lost connection', repr(exc)) + self.lost = True + self.waiter.set_result(None) + + +@coroutine +def start(loop, host, port): + sslctx = None + if args.tls: + sslctx = test_utils.dummy_ssl_context() + tr, pr = yield From(loop.create_connection(Client, host, port, + ssl=sslctx)) + dprint('tr =', tr) + dprint('pr =', pr) + yield From(pr.waiter) + + +def main(): + global args + args = ARGS.parse_args() + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + set_event_loop(loop) + else: + loop = get_event_loop() + try: + loop.run_until_complete(start(loop, args.host, args.port)) + finally: + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/source1.py b/examples/source1.py new file mode 100644 index 00000000..48a53af9 --- /dev/null +++ b/examples/source1.py @@ -0,0 +1,100 @@ +"""Like source.py, but uses streams.""" + +from __future__ import print_function +import argparse +import sys + +from trollius import * +from trollius import test_utils + +ARGS = argparse.ArgumentParser(description="TCP data sink example.") +ARGS.add_argument( + '--tls', action='store_true', dest='tls', + default=False, help='Use TLS') +ARGS.add_argument( + '--iocp', action='store_true', dest='iocp', + default=False, help='Use IOCP event loop (Windows only)') +ARGS.add_argument( + '--stop', action='store_true', dest='stop', + default=False, help='Stop the server by sending it b"stop" as data') +ARGS.add_argument( + '--host', action='store', dest='host', + default='127.0.0.1', help='Host name') +ARGS.add_argument( + '--port', action='store', dest='port', + default=1111, type=int, help='Port number') +ARGS.add_argument( + '--size', action='store', dest='size', + default=16*1024, type=int, help='Data size') + + +class Debug: + """A clever little class that suppresses repetitive messages.""" + + overwriting = False + label = 'stream1:' + + def print_(self, *args): + if self.overwriting: + print(file=sys.stderr) + self.overwriting = 0 + print(self.label, *args, file=sys.stderr) + + def oprint(self, *args): + self.overwriting += 1 + end = '\n' + if self.overwriting >= 3: + if self.overwriting == 3: + print(self.label, '[...]', file=sys.stderr) + end = '\r' + print(self.label, *args, file=sys.stderr, end=end) + sys.stdout.flush() + + +@coroutine +def start(loop, args): + d = Debug() + total = 0 + sslctx = None + if args.tls: + d.print_('using dummy SSLContext') + sslctx = test_utils.dummy_ssl_context() + r, w = yield From(open_connection(args.host, args.port, ssl=sslctx)) + d.print_('r =', r) + d.print_('w =', w) + if args.stop: + w.write(b'stop') + w.close() + else: + size = args.size + data = b'x'*size + try: + while True: + total += size + d.oprint('writing', size, 'bytes; total', total) + w.write(data) + f = w.drain() + if f: + d.print_('pausing') + yield From(f) + except (ConnectionResetError, BrokenPipeError) as exc: + d.print_('caught', repr(exc)) + + +def main(): + global args + args = ARGS.parse_args() + if args.iocp: + from trollius.windows_events import ProactorEventLoop + loop = ProactorEventLoop() + set_event_loop(loop) + else: + loop = get_event_loop() + try: + loop.run_until_complete(start(loop, args)) + finally: + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/stacks.py b/examples/stacks.py new file mode 100644 index 00000000..abe24a0f --- /dev/null +++ b/examples/stacks.py @@ -0,0 +1,44 @@ +"""Crude demo for print_stack().""" + + +from trollius import * + + +@coroutine +def helper(r): + print('--- helper ---') + for t in Task.all_tasks(): + t.print_stack() + print('--- end helper ---') + line = yield From(r.readline()) + 1/0 + raise Return(line) + +def doit(): + l = get_event_loop() + lr = l.run_until_complete + r, w = lr(open_connection('python.org', 80)) + t1 = async(helper(r)) + for t in Task.all_tasks(): t.print_stack() + print('---') + l._run_once() + for t in Task.all_tasks(): t.print_stack() + print('---') + w.write(b'GET /\r\n') + w.write_eof() + try: + lr(t1) + except Exception as e: + print('catching', e) + finally: + for t in Task.all_tasks(): + t.print_stack() + l.close() + + +def main(): + doit() + + +if __name__ == '__main__': + main() diff --git a/examples/subprocess_attach_read_pipe.py b/examples/subprocess_attach_read_pipe.py new file mode 100644 index 00000000..a2f9bb5d --- /dev/null +++ b/examples/subprocess_attach_read_pipe.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +"""Example showing how to attach a read pipe to a subprocess.""" +import trollius as asyncio +import os, sys +from trollius import From + +code = """ +import os, sys +fd = int(sys.argv[1]) +os.write(fd, b'data') +os.close(fd) +""" + +loop = asyncio.get_event_loop() + +@asyncio.coroutine +def task(): + rfd, wfd = os.pipe() + args = [sys.executable, '-c', code, str(wfd)] + + pipe = os.fdopen(rfd, 'rb', 0) + reader = asyncio.StreamReader(loop=loop) + protocol = asyncio.StreamReaderProtocol(reader, loop=loop) + transport, _ = yield From(loop.connect_read_pipe(lambda: protocol, pipe)) + + kwds = {} + if sys.version_info >= (3, 2): + kwds['pass_fds'] = (wfd,) + proc = yield From(asyncio.create_subprocess_exec(*args, **kwds)) + yield From(proc.wait()) + + os.close(wfd) + data = yield From(reader.read()) + print("read = %r" % data.decode()) + +loop.run_until_complete(task()) +loop.close() diff --git a/examples/subprocess_attach_write_pipe.py b/examples/subprocess_attach_write_pipe.py new file mode 100644 index 00000000..8b9e7ec9 --- /dev/null +++ b/examples/subprocess_attach_write_pipe.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +"""Example showing how to attach a write pipe to a subprocess.""" +import trollius as asyncio +from trollius import From +import os, sys +from trollius import subprocess + +code = """ +import os, sys +fd = int(sys.argv[1]) +data = os.read(fd, 1024) +if sys.version_info >= (3,): + stdout = sys.stdout.buffer +else: + stdout = sys.stdout +stdout.write(data) +""" + +loop = asyncio.get_event_loop() + +@asyncio.coroutine +def task(): + rfd, wfd = os.pipe() + args = [sys.executable, '-c', code, str(rfd)] + kwargs = {'stdout': subprocess.PIPE} + if sys.version_info >= (3, 2): + kwargs['pass_fds'] = (rfd,) + proc = yield From(asyncio.create_subprocess_exec(*args, **kwargs)) + + pipe = os.fdopen(wfd, 'wb', 0) + transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, + pipe)) + transport.write(b'data') + + stdout, stderr = yield From(proc.communicate()) + print("stdout = %r" % stdout.decode()) + pipe.close() + +loop.run_until_complete(task()) +loop.close() diff --git a/examples/subprocess_shell.py b/examples/subprocess_shell.py new file mode 100644 index 00000000..89412367 --- /dev/null +++ b/examples/subprocess_shell.py @@ -0,0 +1,89 @@ +"""Example writing to and reading from a subprocess at the same time using +tasks.""" + +import trollius as asyncio +import os +from trollius import From +from trollius.subprocess import PIPE +from trollius.py33_exceptions import BrokenPipeError, ConnectionResetError + + +@asyncio.coroutine +def send_input(writer, input): + try: + for line in input: + print('sending %s bytes' % len(line)) + writer.write(line) + d = writer.drain() + if d: + print('pause writing') + yield From(d) + print('resume writing') + writer.close() + except BrokenPipeError: + print('stdin: broken pipe error') + except ConnectionResetError: + print('stdin: connection reset error') + +@asyncio.coroutine +def log_errors(reader): + while True: + line = yield From(reader.readline()) + if not line: + break + print('ERROR', repr(line)) + +@asyncio.coroutine +def read_stdout(stdout): + while True: + line = yield From(stdout.readline()) + print('received', repr(line)) + if not line: + break + +@asyncio.coroutine +def start(cmd, input=None, **kwds): + kwds['stdout'] = PIPE + kwds['stderr'] = PIPE + if input is None and 'stdin' not in kwds: + kwds['stdin'] = None + else: + kwds['stdin'] = PIPE + proc = yield From(asyncio.create_subprocess_shell(cmd, **kwds)) + + tasks = [] + if input is not None: + tasks.append(send_input(proc.stdin, input)) + else: + print('No stdin') + if proc.stderr is not None: + tasks.append(log_errors(proc.stderr)) + else: + print('No stderr') + if proc.stdout is not None: + tasks.append(read_stdout(proc.stdout)) + else: + print('No stdout') + + if tasks: + # feed stdin while consuming stdout to avoid hang + # when stdin pipe is full + yield From(asyncio.wait(tasks)) + + exitcode = yield From(proc.wait()) + print("exit code: %s" % exitcode) + + +def main(): + if os.name == 'nt': + loop = asyncio.ProactorEventLoop() + asyncio.set_event_loop(loop) + else: + loop = asyncio.get_event_loop() + loop.run_until_complete(start( + 'sleep 2; wc', input=[b'foo bar baz\n'*300 for i in range(100)])) + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/tcp_echo.py b/examples/tcp_echo.py new file mode 100755 index 00000000..773327f7 --- /dev/null +++ b/examples/tcp_echo.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +"""TCP echo server example.""" +import argparse +import trollius as asyncio +import sys +try: + import signal +except ImportError: + signal = None + + +class EchoServer(asyncio.Protocol): + + TIMEOUT = 5.0 + + def timeout(self): + print('connection timeout, closing.') + self.transport.close() + + def connection_made(self, transport): + print('connection made') + self.transport = transport + + # start 5 seconds timeout timer + self.h_timeout = asyncio.get_event_loop().call_later( + self.TIMEOUT, self.timeout) + + def data_received(self, data): + print('data received: ', data.decode()) + self.transport.write(b'Re: ' + data) + + # restart timeout timer + self.h_timeout.cancel() + self.h_timeout = asyncio.get_event_loop().call_later( + self.TIMEOUT, self.timeout) + + def eof_received(self): + pass + + def connection_lost(self, exc): + print('connection lost:', exc) + self.h_timeout.cancel() + + +class EchoClient(asyncio.Protocol): + + message = 'This is the message. It will be echoed.' + + def connection_made(self, transport): + self.transport = transport + self.transport.write(self.message.encode()) + print('data sent:', self.message) + + def data_received(self, data): + print('data received:', data) + + # disconnect after 10 seconds + asyncio.get_event_loop().call_later(10.0, self.transport.close) + + def eof_received(self): + pass + + def connection_lost(self, exc): + print('connection lost:', exc) + asyncio.get_event_loop().stop() + + +def start_client(loop, host, port): + t = asyncio.Task(loop.create_connection(EchoClient, host, port)) + loop.run_until_complete(t) + + +def start_server(loop, host, port): + f = loop.create_server(EchoServer, host, port) + return loop.run_until_complete(f) + + +ARGS = argparse.ArgumentParser(description="TCP Echo example.") +ARGS.add_argument( + '--server', action="store_true", dest='server', + default=False, help='Run tcp server') +ARGS.add_argument( + '--client', action="store_true", dest='client', + default=False, help='Run tcp client') +ARGS.add_argument( + '--host', action="store", dest='host', + default='127.0.0.1', help='Host name') +ARGS.add_argument( + '--port', action="store", dest='port', + default=9999, type=int, help='Port number') +ARGS.add_argument( + '--iocp', action="store_true", dest='iocp', + default=False, help='Use IOCP event loop') + + +if __name__ == '__main__': + args = ARGS.parse_args() + + if ':' in args.host: + args.host, port = args.host.split(':', 1) + args.port = int(port) + + if (not (args.server or args.client)) or (args.server and args.client): + print('Please specify --server or --client\n') + ARGS.print_help() + else: + if args.iocp: + from trollius import windows_events + loop = windows_events.ProactorEventLoop() + asyncio.set_event_loop(loop) + else: + loop = asyncio.get_event_loop() + print ('Using backend: {0}'.format(loop.__class__.__name__)) + + if signal is not None and sys.platform != 'win32': + loop.add_signal_handler(signal.SIGINT, loop.stop) + + if args.server: + server = start_server(loop, args.host, args.port) + else: + start_client(loop, args.host, args.port) + + try: + loop.run_forever() + finally: + if args.server: + server.close() + loop.close() diff --git a/examples/timing_tcp_server.py b/examples/timing_tcp_server.py new file mode 100644 index 00000000..67e714d8 --- /dev/null +++ b/examples/timing_tcp_server.py @@ -0,0 +1,175 @@ +""" +A variant of simple_tcp_server.py that measures the time it takes to +send N messages for a range of N. (This was O(N**2) in a previous +version of asyncio.) + +Note that running this example starts both the TCP server and client +in the same process. It listens on port 1234 on 127.0.0.1, so it will +fail if this port is currently in use. +""" + +from __future__ import print_function +import sys +import time +import random + +import trollius as asyncio +import asyncio.streams +from trollius import From, Return + + +class MyServer: + """ + This is just an example of how a TCP server might be potentially + structured. This class has basically 3 methods: start the server, + handle a client, and stop the server. + + Note that you don't have to follow this structure, it is really + just an example or possible starting point. + """ + + def __init__(self): + self.server = None # encapsulates the server sockets + + # this keeps track of all the clients that connected to our + # server. It can be useful in some cases, for instance to + # kill client connections or to broadcast some data to all + # clients... + self.clients = {} # task -> (reader, writer) + + def _accept_client(self, client_reader, client_writer): + """ + This method accepts a new client connection and creates a Task + to handle this client. self.clients is updated to keep track + of the new client. + """ + + # start a new Task to handle this specific client connection + task = asyncio.Task(self._handle_client(client_reader, client_writer)) + self.clients[task] = (client_reader, client_writer) + + def client_done(task): + print("client task done:", task, file=sys.stderr) + del self.clients[task] + + task.add_done_callback(client_done) + + @asyncio.coroutine + def _handle_client(self, client_reader, client_writer): + """ + This method actually does the work to handle the requests for + a specific client. The protocol is line oriented, so there is + a main loop that reads a line with a request and then sends + out one or more lines back to the client with the result. + """ + while True: + data = (yield From(client_reader.readline())) + data = data.decode("utf-8") + if not data: # an empty string means the client disconnected + break + parts = data.rstrip().split(' ') + cmd = parts[0] + args = parts[1:] + if cmd == 'add': + arg1 = float(args[0]) + arg2 = float(args[1]) + retval = arg1 + arg2 + client_writer.write("{0!r}\n".format(retval).encode("utf-8")) + elif cmd == 'repeat': + times = int(args[0]) + msg = args[1] + client_writer.write("begin\n".encode("utf-8")) + for idx in range(times): + client_writer.write("{0}. {1}\n".format( + idx+1, msg + 'x'*random.randint(10, 50)) + .encode("utf-8")) + client_writer.write("end\n".encode("utf-8")) + else: + print("Bad command {0!r}".format(data), file=sys.stderr) + + # This enables us to have flow control in our connection. + yield From(client_writer.drain()) + + def start(self, loop): + """ + Starts the TCP server, so that it listens on port 1234. + + For each client that connects, the accept_client method gets + called. This method runs the loop until the server sockets + are ready to accept connections. + """ + self.server = loop.run_until_complete( + asyncio.streams.start_server(self._accept_client, + '127.0.0.1', 12345, + loop=loop)) + + def stop(self, loop): + """ + Stops the TCP server, i.e. closes the listening socket(s). + + This method runs the loop until the server sockets are closed. + """ + if self.server is not None: + self.server.close() + loop.run_until_complete(self.server.wait_closed()) + self.server = None + + +def main(): + loop = asyncio.get_event_loop() + + # creates a server and starts listening to TCP connections + server = MyServer() + server.start(loop) + + @asyncio.coroutine + def client(): + reader, writer = yield From(asyncio.streams.open_connection( + '127.0.0.1', 12345, loop=loop)) + + def send(msg): + print("> " + msg) + writer.write((msg + '\n').encode("utf-8")) + + def recv(): + msgback = (yield From(reader.readline())) + msgback = msgback.decode("utf-8").rstrip() + print("< " + msgback) + raise Return(msgback) + + # send a line + send("add 1 2") + msg = yield From(recv()) + + Ns = list(range(100, 100000, 10000)) + times = [] + + for N in Ns: + t0 = time.time() + send("repeat {0} hello world ".format(N)) + msg = yield From(recv()) + assert msg == 'begin' + while True: + msg = (yield From(reader.readline())) + msg = msg.decode("utf-8").rstrip() + if msg == 'end': + break + t1 = time.time() + dt = t1 - t0 + print("Time taken: {0:.3f} seconds ({1:.6f} per repetition)" + .format(dt, dt/N)) + times.append(dt) + + writer.close() + yield From(asyncio.sleep(0.5)) + + # creates a client and connects to our server + try: + loop.run_until_complete(client()) + server.stop(loop) + finally: + loop.close() + + +if __name__ == '__main__': + main() diff --git a/examples/udp_echo.py b/examples/udp_echo.py new file mode 100755 index 00000000..bd646396 --- /dev/null +++ b/examples/udp_echo.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +"""UDP echo example.""" +import argparse +import sys +import trollius as asyncio +try: + import signal +except ImportError: + signal = None + + +class MyServerUdpEchoProtocol: + + def connection_made(self, transport): + print('start', transport) + self.transport = transport + + def datagram_received(self, data, addr): + print('Data received:', data, addr) + self.transport.sendto(data, addr) + + def error_received(self, exc): + print('Error received:', exc) + + def connection_lost(self, exc): + print('stop', exc) + + +class MyClientUdpEchoProtocol: + + message = 'This is the message. It will be echoed.' + + def connection_made(self, transport): + self.transport = transport + print('sending "{0}"'.format(self.message)) + self.transport.sendto(self.message.encode()) + print('waiting to receive') + + def datagram_received(self, data, addr): + print('received "{0}"'.format(data.decode())) + self.transport.close() + + def error_received(self, exc): + print('Error received:', exc) + + def connection_lost(self, exc): + print('closing transport', exc) + loop = asyncio.get_event_loop() + loop.stop() + + +def start_server(loop, addr): + t = asyncio.Task(loop.create_datagram_endpoint( + MyServerUdpEchoProtocol, local_addr=addr)) + transport, server = loop.run_until_complete(t) + return transport + + +def start_client(loop, addr): + t = asyncio.Task(loop.create_datagram_endpoint( + MyClientUdpEchoProtocol, remote_addr=addr)) + loop.run_until_complete(t) + + +ARGS = argparse.ArgumentParser(description="UDP Echo example.") +ARGS.add_argument( + '--server', action="store_true", dest='server', + default=False, help='Run udp server') +ARGS.add_argument( + '--client', action="store_true", dest='client', + default=False, help='Run udp client') +ARGS.add_argument( + '--host', action="store", dest='host', + default='127.0.0.1', help='Host name') +ARGS.add_argument( + '--port', action="store", dest='port', + default=9999, type=int, help='Port number') + + +if __name__ == '__main__': + args = ARGS.parse_args() + if ':' in args.host: + args.host, port = args.host.split(':', 1) + args.port = int(port) + + if (not (args.server or args.client)) or (args.server and args.client): + print('Please specify --server or --client\n') + ARGS.print_help() + else: + loop = asyncio.get_event_loop() + if signal is not None: + loop.add_signal_handler(signal.SIGINT, loop.stop) + + if '--server' in sys.argv: + server = start_server(loop, (args.host, args.port)) + else: + start_client(loop, (args.host, args.port)) + + try: + loop.run_forever() + finally: + if '--server' in sys.argv: + server.close() + loop.close() diff --git a/main.py b/main.py deleted file mode 100644 index 06e1414d..00000000 --- a/main.py +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env python3.3 -"""Example HTTP client using yield-from coroutines (PEP 380). - -Requires Python 3.3. - -There are many micro-optimizations possible here, but that's not the point. - -Some incomplete laundry lists: - -TODO: -- Use poll() or better; need to figure out how to keep fds registered. -- Separate scheduler and event loop. -- A more varied set of test URLs. -- A Hg repo. -- Profiling. -- Unittests. - -PATTERNS TO TRY: -- Wait for all, collate results. -- Wait for first N that are ready. -- Wait until some predicate becomes true. - -FUNCTIONALITY: -- Connection pool (keep connection open). -- Chunked encoding (request and response). -- Pipelining, e.g. zlib (request and response). -- Automatic encoding/decoding. -- Thread pool and getaddrinfo() calling. -- A write() call that isn't a generator. -""" - -__author__ = 'Guido van Rossum ' - -import collections -import errno -import logging -import re -import select -import socket -import time - - -class Scheduler: - - def __init__(self): - self.runnable = collections.deque() - self.current = None - self.readers = {} - self.writers = {} - - def run(self, task): - self.runnable.append(task) - - def loop(self): - while self.runnable or self.readers or self.writers: - self.loop1() - - def loop1(self): -## print('loop1') - while self.runnable: - self.current = self.runnable.popleft() - try: - next(self.current) - except StopIteration: - self.current = None - except Exception: - self.current = None - logging.exception('Exception in task') - else: - if self.current is not None: - self.runnable.append(self.current) - self.current = None - if self.readers or self.writers: - # TODO: Schedule timed calls as well. - # TODO: Use poll() or better. - t0 = time.time() - ready_r, ready_w, _ = select.select(self.readers, self.writers, []) - t1 = time.time() -## print('select({}, {}) took {:.3f} secs to return {}, {}' -## .format(list(self.readers), list(self.writers), -## t1 - t0, ready_r, ready_w)) - for fd in ready_r: - self.unblock(self.readers.pop(fd)) - for fd in ready_w: - self.unblock(self.writers.pop(fd)) - - def unblock(self, task): - assert task - self.runnable.append(task) - - def block(self, queue, fd): - assert isinstance(fd, int) - assert fd not in queue - assert self.current is not None - queue[fd] = self.current - self.current = None - - def block_r(self, fd): - self.block(self.readers, fd) - - def block_w(self, fd): - self.block(self.writers, fd) - - -sched = Scheduler() - - -class RawReader: - - def __init__(self, sock): - self.sock = sock - - def read(self, n): - """Read up to n bytes, blocking at most once.""" - assert n >= 0, n - sched.block_r(self.sock.fileno()) - yield - return self.sock.recv(n) - - -class BufferedReader: - - def __init__(self, raw, limit=8192): - self.raw = raw - self.limit = limit - self.buffer = b'' - self.eof = False - - def read(self, n): - """Read up to n bytes, blocking at most once.""" - assert n >= 0, n - if not self.buffer and not self.eof: - yield from self.fillbuffer(max(n, self.limit)) - return self.getfrombuffer(n) - - def readexactly(self, n): - """Read exactly n bytes, or until EOF.""" - blocks = [] - count = 0 - while n > count: - block = yield from self.read(n - count) - blocks.append(block) - count += len(block) - return b''.join(blocks) - - def readline(self): - """Read up to newline or limit, whichever comes first.""" - end = self.buffer.find(b'\n') + 1 # Point past newline, or 0. - while not end and not self.eof and len(self.buffer) < self.limit: - anchor = len(self.buffer) - yield from self.fillbuffer(self.limit) - end = self.buffer.find(b'\n', anchor) + 1 - if not end: - end = len(self.buffer) - if end > self.limit: - end = self.limit - return self.getfrombuffer(end) - - def getfrombuffer(self, n): - """Read up to n bytes without blocking.""" - if n >= len(self.buffer): - result, self.buffer = self.buffer, b'' - else: - result, self.buffer = self.buffer[:n], self.buffer[n:] - return result - - def fillbuffer(self, n): - """Fill buffer with one (up to) n bytes from raw reader.""" - assert not self.eof, 'fillbuffer called at eof' - data = yield from self.raw.read(n) -## print('fillbuffer:', repr(data)[:100]) - if data: - self.buffer += data - else: - self.eof = True - - -def send(sock, data): -## print('send:', repr(data)) - while data: - sched.block_w(sock.fileno()) - yield - n = sock.send(data) - assert 0 <= n <= len(data), (n, len(data)) - if n == len(data): - break - data = data[n:] - - -def newsocket(): - sock = socket.socket() - sock.setblocking(False) - return sock - - -def connect(sock, address): -## print('connect:', address) - err = sock.connect_ex(address) - assert err == errno.EINPROGRESS, err - sched.block_w(sock.fileno()) - yield - err = sock.connect_ex(address) - if err == errno.ECONNREFUSED: - raise IOError('Connection refused') - if err != errno.EISCONN: - raise IOError('Connect error %d: %s' % (err, errno.errorcode.get(err))) - - -def urlfetch(host, port=80, method='GET', path='/', - body=None, hdrs=None, encoding='utf-8'): - t0 = time.time() - # Must pass in an IP address. Later we'll call getaddrinfo() - # using a thread pool. We'll also support IPv6. - assert re.match(r'(\d+)(\.\d+)(\.\d+)(\.\d+)\Z', host), repr(host) - sock = newsocket() - yield from connect(sock, (host, port)) - yield from send(sock, - method.encode(encoding) + b' ' + - path.encode(encoding) + b' HTTP/1.0\r\n') - if hdrs: - kwds = dict(hdrs) - else: - kwds = {} - if body is not None: - kwds['content_length'] = len(body) - for header, value in kwds.items(): - yield from send(sock, - header.replace('_', '-').encode(encoding) + b': ' + - value.encode(encoding) + b'\r\n') - - yield from send(sock, b'\r\n') - if body is not None: - yield from send(sock, body) - ##sock.shutdown(1) # Close the writing end of the socket. - - # Read HTTP response line. - raw = RawReader(sock) - buf = BufferedReader(raw) - resp = yield from buf.readline() -## print('resp =', repr(resp)) - m = re.match(br'(?ix) http/(\d\.\d) \s+ (\d\d\d) \s+ ([^\r]*)\r?\n\Z', resp) - if not m: - sock.close() - raise IOError('No valid HTTP response: %r' % response) - http_version, status, message = m.groups() - - # Read HTTP headers. - headers = [] - hdict = {} - while True: - line = yield from buf.readline() - if not line.strip(): - break - m = re.match(br'([^\s:]+):\s*([^\r]*)\r?\n\Z', line) - if not m: - raise IOError('Invalid header: %r' % line) - header, value = m.groups() - headers.append((header, value)) - hdict[header.decode(encoding).lower()] = value.decode(encoding) - - # Read response body. - content_length = hdict.get('content-length') - if content_length is not None: - size = int(content_length) # TODO: Catch errors. - assert size >= 0, size - else: - size = 2**20 # Protective limit (1 MB). - data = yield from buf.readexactly(size) - sock.close() # Can this block? - t1 = time.time() -## print(http_version, status, message, headers, hdict, len(data)) - print(host, port, path, status, len(data), '{:.3}'.format(t1-t0)) - - -def doit(): - gen1 = urlfetch('127.0.0.1', 8080, path='/', hdrs={'host': 'localhost'}) - gen2 = urlfetch('82.94.164.162', 80, path='/', hdrs={'host': 'python.org'}) - sched.run(gen1) - sched.run(gen2) - for x in '123': - for y in '0123456789': - g = urlfetch('82.94.164.162', 80, - path='/{}.{}'.format(x, y), - hdrs={'host': 'python.org'}) - sched.run(g) - sched.loop() - - -def main(): - doit() - - -if __name__ == '__main__': - main() diff --git a/overlapped.c b/overlapped.c new file mode 100644 index 00000000..52ff9deb --- /dev/null +++ b/overlapped.c @@ -0,0 +1,1400 @@ +/* + * Support for overlapped IO + * + * Some code borrowed from Modules/_winapi.c of CPython + */ + +/* XXX check overflow and DWORD <-> Py_ssize_t conversions + Check itemsize */ + +#include "Python.h" +#include "structmember.h" + +#define WINDOWS_LEAN_AND_MEAN +#include +#include +#include + +#if defined(MS_WIN32) && !defined(MS_WIN64) +# define F_POINTER "k" +# define T_POINTER T_ULONG +#else +# define F_POINTER "K" +# define T_POINTER T_ULONGLONG +#endif + +#define F_HANDLE F_POINTER +#define F_ULONG_PTR F_POINTER +#define F_DWORD "k" +#define F_BOOL "i" +#define F_UINT "I" + +#define T_HANDLE T_POINTER + +#if PY_MAJOR_VERSION >= 3 +# define PYTHON3 +#endif + +#ifndef Py_MIN +# define Py_MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) +#endif + +#ifndef Py_MAX +# define Py_MAX(X, Y) (((X) > (Y)) ? (X) : (Y)) +#endif + +enum {TYPE_NONE, TYPE_NOT_STARTED, TYPE_READ, TYPE_WRITE, TYPE_ACCEPT, + TYPE_CONNECT, TYPE_DISCONNECT, TYPE_CONNECT_NAMED_PIPE, + TYPE_WAIT_NAMED_PIPE_AND_CONNECT}; + +typedef struct { + PyObject_HEAD + OVERLAPPED overlapped; + /* For convenience, we store the file handle too */ + HANDLE handle; + /* Error returned by last method call */ + DWORD error; + /* Type of operation */ + DWORD type; + union { + /* Buffer used for reading: TYPE_READ and TYPE_ACCEPT */ + PyObject *read_buffer; + /* Buffer used for writing: TYPE_WRITE */ + Py_buffer write_buffer; + }; +} OverlappedObject; + +/* + * Map Windows error codes to subclasses of OSError + */ + +static PyObject * +SetFromWindowsErr(DWORD err) +{ + PyObject *exception_type; + + if (err == 0) + err = GetLastError(); +#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || PY_MAJOR_VERSION > 3 + switch (err) { + case ERROR_CONNECTION_REFUSED: + exception_type = PyExc_ConnectionRefusedError; + break; + case ERROR_CONNECTION_ABORTED: + exception_type = PyExc_ConnectionAbortedError; + break; + default: + exception_type = PyExc_OSError; + } +#else + exception_type = PyExc_WindowsError; +#endif + return PyErr_SetExcFromWindowsErr(exception_type, err); +} + +/* + * Some functions should be loaded at runtime + */ + +static LPFN_ACCEPTEX Py_AcceptEx = NULL; +static LPFN_CONNECTEX Py_ConnectEx = NULL; +static LPFN_DISCONNECTEX Py_DisconnectEx = NULL; +static BOOL (CALLBACK *Py_CancelIoEx)(HANDLE, LPOVERLAPPED) = NULL; + +#define GET_WSA_POINTER(s, x) \ + (SOCKET_ERROR != WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, \ + &Guid##x, sizeof(Guid##x), &Py_##x, \ + sizeof(Py_##x), &dwBytes, NULL, NULL)) + +static int +initialize_function_pointers(void) +{ + GUID GuidAcceptEx = WSAID_ACCEPTEX; + GUID GuidConnectEx = WSAID_CONNECTEX; + GUID GuidDisconnectEx = WSAID_DISCONNECTEX; + HINSTANCE hKernel32; + SOCKET s; + DWORD dwBytes; + + s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + if (s == INVALID_SOCKET) { + SetFromWindowsErr(WSAGetLastError()); + return -1; + } + + if (!GET_WSA_POINTER(s, AcceptEx) || + !GET_WSA_POINTER(s, ConnectEx) || + !GET_WSA_POINTER(s, DisconnectEx)) + { + closesocket(s); + SetFromWindowsErr(WSAGetLastError()); + return -1; + } + + closesocket(s); + + /* On WinXP we will have Py_CancelIoEx == NULL */ + hKernel32 = GetModuleHandle("KERNEL32"); + *(FARPROC *)&Py_CancelIoEx = GetProcAddress(hKernel32, "CancelIoEx"); + return 0; +} + +/* + * Completion port stuff + */ + +PyDoc_STRVAR( + CreateIoCompletionPort_doc, + "CreateIoCompletionPort(handle, port, key, concurrency) -> port\n\n" + "Create a completion port or register a handle with a port."); + +static PyObject * +overlapped_CreateIoCompletionPort(PyObject *self, PyObject *args) +{ + HANDLE FileHandle; + HANDLE ExistingCompletionPort; + ULONG_PTR CompletionKey; + DWORD NumberOfConcurrentThreads; + HANDLE ret; + + if (!PyArg_ParseTuple(args, F_HANDLE F_HANDLE F_ULONG_PTR F_DWORD, + &FileHandle, &ExistingCompletionPort, &CompletionKey, + &NumberOfConcurrentThreads)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = CreateIoCompletionPort(FileHandle, ExistingCompletionPort, + CompletionKey, NumberOfConcurrentThreads); + Py_END_ALLOW_THREADS + + if (ret == NULL) + return SetFromWindowsErr(0); + return Py_BuildValue(F_HANDLE, ret); +} + +PyDoc_STRVAR( + GetQueuedCompletionStatus_doc, + "GetQueuedCompletionStatus(port, msecs) -> (err, bytes, key, address)\n\n" + "Get a message from completion port. Wait for up to msecs milliseconds."); + +static PyObject * +overlapped_GetQueuedCompletionStatus(PyObject *self, PyObject *args) +{ + HANDLE CompletionPort = NULL; + DWORD NumberOfBytes = 0; + ULONG_PTR CompletionKey = 0; + OVERLAPPED *Overlapped = NULL; + DWORD Milliseconds; + DWORD err; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD, + &CompletionPort, &Milliseconds)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = GetQueuedCompletionStatus(CompletionPort, &NumberOfBytes, + &CompletionKey, &Overlapped, Milliseconds); + Py_END_ALLOW_THREADS + + err = ret ? ERROR_SUCCESS : GetLastError(); + if (Overlapped == NULL) { + if (err == WAIT_TIMEOUT) + Py_RETURN_NONE; + else + return SetFromWindowsErr(err); + } + return Py_BuildValue(F_DWORD F_DWORD F_ULONG_PTR F_POINTER, + err, NumberOfBytes, CompletionKey, Overlapped); +} + +PyDoc_STRVAR( + PostQueuedCompletionStatus_doc, + "PostQueuedCompletionStatus(port, bytes, key, address) -> None\n\n" + "Post a message to completion port."); + +static PyObject * +overlapped_PostQueuedCompletionStatus(PyObject *self, PyObject *args) +{ + HANDLE CompletionPort; + DWORD NumberOfBytes; + ULONG_PTR CompletionKey; + OVERLAPPED *Overlapped; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD F_ULONG_PTR F_POINTER, + &CompletionPort, &NumberOfBytes, &CompletionKey, + &Overlapped)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = PostQueuedCompletionStatus(CompletionPort, NumberOfBytes, + CompletionKey, Overlapped); + Py_END_ALLOW_THREADS + + if (!ret) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +/* + * Wait for a handle + */ + +struct PostCallbackData { + HANDLE CompletionPort; + LPOVERLAPPED Overlapped; +}; + +static VOID CALLBACK +PostToQueueCallback(PVOID lpParameter, BOOL TimerOrWaitFired) +{ + struct PostCallbackData *p = (struct PostCallbackData*) lpParameter; + + PostQueuedCompletionStatus(p->CompletionPort, TimerOrWaitFired, + 0, p->Overlapped); + /* ignore possible error! */ + PyMem_Free(p); +} + +PyDoc_STRVAR( + RegisterWaitWithQueue_doc, + "RegisterWaitWithQueue(Object, CompletionPort, Overlapped, Timeout)\n" + " -> WaitHandle\n\n" + "Register wait for Object; when complete CompletionPort is notified.\n"); + +static PyObject * +overlapped_RegisterWaitWithQueue(PyObject *self, PyObject *args) +{ + HANDLE NewWaitObject; + HANDLE Object; + ULONG Milliseconds; + struct PostCallbackData data, *pdata; + + if (!PyArg_ParseTuple(args, F_HANDLE F_HANDLE F_POINTER F_DWORD, + &Object, + &data.CompletionPort, + &data.Overlapped, + &Milliseconds)) + return NULL; + + pdata = PyMem_Malloc(sizeof(struct PostCallbackData)); + if (pdata == NULL) + return SetFromWindowsErr(0); + + *pdata = data; + + if (!RegisterWaitForSingleObject( + &NewWaitObject, Object, (WAITORTIMERCALLBACK)PostToQueueCallback, + pdata, Milliseconds, + WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) + { + PyMem_Free(pdata); + return SetFromWindowsErr(0); + } + + return Py_BuildValue(F_HANDLE, NewWaitObject); +} + +PyDoc_STRVAR( + UnregisterWait_doc, + "UnregisterWait(WaitHandle) -> None\n\n" + "Unregister wait handle.\n"); + +static PyObject * +overlapped_UnregisterWait(PyObject *self, PyObject *args) +{ + HANDLE WaitHandle; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE, &WaitHandle)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = UnregisterWait(WaitHandle); + Py_END_ALLOW_THREADS + + if (!ret) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +PyDoc_STRVAR( + UnregisterWaitEx_doc, + "UnregisterWaitEx(WaitHandle, Event) -> None\n\n" + "Unregister wait handle.\n"); + +static PyObject * +overlapped_UnregisterWaitEx(PyObject *self, PyObject *args) +{ + HANDLE WaitHandle, Event; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE F_HANDLE, &WaitHandle, &Event)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = UnregisterWaitEx(WaitHandle, Event); + Py_END_ALLOW_THREADS + + if (!ret) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +/* + * Event functions -- currently only used by tests + */ + +PyDoc_STRVAR( + CreateEvent_doc, + "CreateEvent(EventAttributes, ManualReset, InitialState, Name)" + " -> Handle\n\n" + "Create an event. EventAttributes must be None.\n"); + +static PyObject * +overlapped_CreateEvent(PyObject *self, PyObject *args) +{ + PyObject *EventAttributes; + BOOL ManualReset; + BOOL InitialState; + Py_UNICODE *Name; + HANDLE Event; + +#ifdef PYTHON3 + if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "Z", +#else + if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "z", +#endif + &EventAttributes, &ManualReset, + &InitialState, &Name)) + return NULL; + + if (EventAttributes != Py_None) { + PyErr_SetString(PyExc_ValueError, "EventAttributes must be None"); + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + Event = CreateEventW(NULL, ManualReset, InitialState, Name); + Py_END_ALLOW_THREADS + + if (Event == NULL) + return SetFromWindowsErr(0); + return Py_BuildValue(F_HANDLE, Event); +} + +PyDoc_STRVAR( + SetEvent_doc, + "SetEvent(Handle) -> None\n\n" + "Set event.\n"); + +static PyObject * +overlapped_SetEvent(PyObject *self, PyObject *args) +{ + HANDLE Handle; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE, &Handle)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = SetEvent(Handle); + Py_END_ALLOW_THREADS + + if (!ret) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +PyDoc_STRVAR( + ResetEvent_doc, + "ResetEvent(Handle) -> None\n\n" + "Reset event.\n"); + +static PyObject * +overlapped_ResetEvent(PyObject *self, PyObject *args) +{ + HANDLE Handle; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE, &Handle)) + return NULL; + + Py_BEGIN_ALLOW_THREADS + ret = ResetEvent(Handle); + Py_END_ALLOW_THREADS + + if (!ret) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +/* + * Bind socket handle to local port without doing slow getaddrinfo() + */ + +PyDoc_STRVAR( + BindLocal_doc, + "BindLocal(handle, family) -> None\n\n" + "Bind a socket handle to an arbitrary local port.\n" + "family should AF_INET or AF_INET6.\n"); + +static PyObject * +overlapped_BindLocal(PyObject *self, PyObject *args) +{ + SOCKET Socket; + int Family; + BOOL ret; + + if (!PyArg_ParseTuple(args, F_HANDLE "i", &Socket, &Family)) + return NULL; + + if (Family == AF_INET) { + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_port = 0; + addr.sin_addr.S_un.S_addr = INADDR_ANY; + ret = bind(Socket, (SOCKADDR*)&addr, sizeof(addr)) != SOCKET_ERROR; + } else if (Family == AF_INET6) { + struct sockaddr_in6 addr; + memset(&addr, 0, sizeof(addr)); + addr.sin6_family = AF_INET6; + addr.sin6_port = 0; + addr.sin6_addr = in6addr_any; + ret = bind(Socket, (SOCKADDR*)&addr, sizeof(addr)) != SOCKET_ERROR; + } else { + PyErr_SetString(PyExc_ValueError, "expected tuple of length 2 or 4"); + return NULL; + } + + if (!ret) + return SetFromWindowsErr(WSAGetLastError()); + Py_RETURN_NONE; +} + +/* + * Windows equivalent of os.strerror() -- compare _ctypes/callproc.c + */ + +PyDoc_STRVAR( + FormatMessage_doc, + "FormatMessage(error_code) -> error_message\n\n" + "Return error message for an error code."); + +static PyObject * +overlapped_FormatMessage(PyObject *ignore, PyObject *args) +{ + DWORD code, n; + WCHAR *lpMsgBuf; + PyObject *res; + + if (!PyArg_ParseTuple(args, F_DWORD, &code)) + return NULL; + + n = FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + code, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPWSTR) &lpMsgBuf, + 0, + NULL); + if (n) { + while (iswspace(lpMsgBuf[n-1])) + --n; + lpMsgBuf[n] = L'\0'; + res = Py_BuildValue("u", lpMsgBuf); + } else { + res = PyUnicode_FromFormat("unknown error code %u", code); + } + LocalFree(lpMsgBuf); + return res; +} + + +/* + * Mark operation as completed - used when reading produces ERROR_BROKEN_PIPE + */ + +static void +mark_as_completed(OVERLAPPED *ov) +{ + ov->Internal = 0; + if (ov->hEvent != NULL) + SetEvent(ov->hEvent); +} + +/* + * A Python object wrapping an OVERLAPPED structure and other useful data + * for overlapped I/O + */ + +PyDoc_STRVAR( + Overlapped_doc, + "Overlapped object"); + +static PyObject * +Overlapped_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + OverlappedObject *self; + HANDLE event = INVALID_HANDLE_VALUE; + static char *kwlist[] = {"event", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|" F_HANDLE, kwlist, &event)) + return NULL; + + if (event == INVALID_HANDLE_VALUE) { + event = CreateEvent(NULL, TRUE, FALSE, NULL); + if (event == NULL) + return SetFromWindowsErr(0); + } + + self = PyObject_New(OverlappedObject, type); + if (self == NULL) { + if (event != NULL) + CloseHandle(event); + return NULL; + } + + self->handle = NULL; + self->error = 0; + self->type = TYPE_NONE; + self->read_buffer = NULL; + memset(&self->overlapped, 0, sizeof(OVERLAPPED)); + memset(&self->write_buffer, 0, sizeof(Py_buffer)); + if (event) + self->overlapped.hEvent = event; + return (PyObject *)self; +} + +static void +Overlapped_dealloc(OverlappedObject *self) +{ + DWORD bytes; + DWORD olderr = GetLastError(); + BOOL wait = FALSE; + BOOL ret; + + if (!HasOverlappedIoCompleted(&self->overlapped) && + self->type != TYPE_NOT_STARTED) + { + if (Py_CancelIoEx && Py_CancelIoEx(self->handle, &self->overlapped)) + wait = TRUE; + + Py_BEGIN_ALLOW_THREADS + ret = GetOverlappedResult(self->handle, &self->overlapped, + &bytes, wait); + Py_END_ALLOW_THREADS + + switch (ret ? ERROR_SUCCESS : GetLastError()) { + case ERROR_SUCCESS: + case ERROR_NOT_FOUND: + case ERROR_OPERATION_ABORTED: + break; + default: + PyErr_Format( + PyExc_RuntimeError, + "%R still has pending operation at " + "deallocation, the process may crash", self); + PyErr_WriteUnraisable(NULL); + } + } + + if (self->overlapped.hEvent != NULL) + CloseHandle(self->overlapped.hEvent); + + switch (self->type) { + case TYPE_READ: + case TYPE_ACCEPT: + Py_CLEAR(self->read_buffer); + break; + case TYPE_WRITE: + if (self->write_buffer.obj) + PyBuffer_Release(&self->write_buffer); + break; + } + PyObject_Del(self); + SetLastError(olderr); +} + +PyDoc_STRVAR( + Overlapped_cancel_doc, + "cancel() -> None\n\n" + "Cancel overlapped operation"); + +static PyObject * +Overlapped_cancel(OverlappedObject *self) +{ + BOOL ret = TRUE; + + if (self->type == TYPE_NOT_STARTED + || self->type == TYPE_WAIT_NAMED_PIPE_AND_CONNECT) + Py_RETURN_NONE; + + if (!HasOverlappedIoCompleted(&self->overlapped)) { + Py_BEGIN_ALLOW_THREADS + if (Py_CancelIoEx) + ret = Py_CancelIoEx(self->handle, &self->overlapped); + else + ret = CancelIo(self->handle); + Py_END_ALLOW_THREADS + } + + /* CancelIoEx returns ERROR_NOT_FOUND if the I/O completed in-between */ + if (!ret && GetLastError() != ERROR_NOT_FOUND) + return SetFromWindowsErr(0); + Py_RETURN_NONE; +} + +PyDoc_STRVAR( + Overlapped_getresult_doc, + "getresult(wait=False) -> result\n\n" + "Retrieve result of operation. If wait is true then it blocks\n" + "until the operation is finished. If wait is false and the\n" + "operation is still pending then an error is raised."); + +static PyObject * +Overlapped_getresult(OverlappedObject *self, PyObject *args) +{ + BOOL wait = FALSE; + DWORD transferred = 0; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, "|" F_BOOL, &wait)) + return NULL; + + if (self->type == TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation not yet attempted"); + return NULL; + } + + if (self->type == TYPE_NOT_STARTED) { + PyErr_SetString(PyExc_ValueError, "operation failed to start"); + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + ret = GetOverlappedResult(self->handle, &self->overlapped, &transferred, + wait); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : GetLastError(); + switch (err) { + case ERROR_SUCCESS: + case ERROR_MORE_DATA: + break; + case ERROR_BROKEN_PIPE: + if ((self->type == TYPE_READ || self->type == TYPE_ACCEPT) && self->read_buffer != NULL) + break; + /* fall through */ + default: + return SetFromWindowsErr(err); + } + + switch (self->type) { + case TYPE_READ: + assert(PyBytes_CheckExact(self->read_buffer)); + if (transferred != PyBytes_GET_SIZE(self->read_buffer) && + _PyBytes_Resize(&self->read_buffer, transferred)) + return NULL; + Py_INCREF(self->read_buffer); + return self->read_buffer; + default: + return PyLong_FromUnsignedLong((unsigned long) transferred); + } +} + +PyDoc_STRVAR( + Overlapped_ReadFile_doc, + "ReadFile(handle, size) -> Overlapped[message]\n\n" + "Start overlapped read"); + +static PyObject * +Overlapped_ReadFile(OverlappedObject *self, PyObject *args) +{ + HANDLE handle; + DWORD size; + DWORD nread; + PyObject *buf; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD, &handle, &size)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + +#if SIZEOF_SIZE_T <= SIZEOF_LONG + size = Py_MIN(size, (DWORD)PY_SSIZE_T_MAX); +#endif + buf = PyBytes_FromStringAndSize(NULL, Py_MAX(size, 1)); + if (buf == NULL) + return NULL; + + self->type = TYPE_READ; + self->handle = handle; + self->read_buffer = buf; + + Py_BEGIN_ALLOW_THREADS + ret = ReadFile(handle, PyBytes_AS_STRING(buf), size, &nread, + &self->overlapped); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : GetLastError(); + switch (err) { + case ERROR_BROKEN_PIPE: + mark_as_completed(&self->overlapped); + return SetFromWindowsErr(err); + case ERROR_SUCCESS: + case ERROR_MORE_DATA: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_WSARecv_doc, + "RecvFile(handle, size, flags) -> Overlapped[message]\n\n" + "Start overlapped receive"); + +static PyObject * +Overlapped_WSARecv(OverlappedObject *self, PyObject *args) +{ + HANDLE handle; + DWORD size; + DWORD flags = 0; + DWORD nread; + PyObject *buf; + WSABUF wsabuf; + int ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD "|" F_DWORD, + &handle, &size, &flags)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + +#if SIZEOF_SIZE_T <= SIZEOF_LONG + size = Py_MIN(size, (DWORD)PY_SSIZE_T_MAX); +#endif + buf = PyBytes_FromStringAndSize(NULL, Py_MAX(size, 1)); + if (buf == NULL) + return NULL; + + self->type = TYPE_READ; + self->handle = handle; + self->read_buffer = buf; + wsabuf.len = size; + wsabuf.buf = PyBytes_AS_STRING(buf); + + Py_BEGIN_ALLOW_THREADS + ret = WSARecv((SOCKET)handle, &wsabuf, 1, &nread, &flags, + &self->overlapped, NULL); + Py_END_ALLOW_THREADS + + self->error = err = (ret < 0 ? WSAGetLastError() : ERROR_SUCCESS); + switch (err) { + case ERROR_BROKEN_PIPE: + mark_as_completed(&self->overlapped); + return SetFromWindowsErr(err); + case ERROR_SUCCESS: + case ERROR_MORE_DATA: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_WriteFile_doc, + "WriteFile(handle, buf) -> Overlapped[bytes_transferred]\n\n" + "Start overlapped write"); + +static PyObject * +Overlapped_WriteFile(OverlappedObject *self, PyObject *args) +{ + HANDLE handle; + PyObject *bufobj; + DWORD written; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE "O", &handle, &bufobj)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + +#ifdef PYTHON3 + if (!PyArg_Parse(bufobj, "y*", &self->write_buffer)) +#else + if (!PyArg_Parse(bufobj, "s*", &self->write_buffer)) +#endif + return NULL; + +#if SIZEOF_SIZE_T > SIZEOF_LONG + if (self->write_buffer.len > (Py_ssize_t)ULONG_MAX) { + PyBuffer_Release(&self->write_buffer); + PyErr_SetString(PyExc_ValueError, "buffer to large"); + return NULL; + } +#endif + + self->type = TYPE_WRITE; + self->handle = handle; + + Py_BEGIN_ALLOW_THREADS + ret = WriteFile(handle, self->write_buffer.buf, + (DWORD)self->write_buffer.len, + &written, &self->overlapped); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : GetLastError(); + switch (err) { + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_WSASend_doc, + "WSASend(handle, buf, flags) -> Overlapped[bytes_transferred]\n\n" + "Start overlapped send"); + +static PyObject * +Overlapped_WSASend(OverlappedObject *self, PyObject *args) +{ + HANDLE handle; + PyObject *bufobj; + DWORD flags; + DWORD written; + WSABUF wsabuf; + int ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE "O" F_DWORD, + &handle, &bufobj, &flags)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + +#ifdef PYTHON3 + if (!PyArg_Parse(bufobj, "y*", &self->write_buffer)) +#else + if (!PyArg_Parse(bufobj, "s*", &self->write_buffer)) +#endif + return NULL; + +#if SIZEOF_SIZE_T > SIZEOF_LONG + if (self->write_buffer.len > (Py_ssize_t)ULONG_MAX) { + PyBuffer_Release(&self->write_buffer); + PyErr_SetString(PyExc_ValueError, "buffer to large"); + return NULL; + } +#endif + + self->type = TYPE_WRITE; + self->handle = handle; + wsabuf.len = (DWORD)self->write_buffer.len; + wsabuf.buf = self->write_buffer.buf; + + Py_BEGIN_ALLOW_THREADS + ret = WSASend((SOCKET)handle, &wsabuf, 1, &written, flags, + &self->overlapped, NULL); + Py_END_ALLOW_THREADS + + self->error = err = (ret < 0 ? WSAGetLastError() : ERROR_SUCCESS); + switch (err) { + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_AcceptEx_doc, + "AcceptEx(listen_handle, accept_handle) -> Overlapped[address_as_bytes]\n\n" + "Start overlapped wait for client to connect"); + +static PyObject * +Overlapped_AcceptEx(OverlappedObject *self, PyObject *args) +{ + SOCKET ListenSocket; + SOCKET AcceptSocket; + DWORD BytesReceived; + DWORD size; + PyObject *buf; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE F_HANDLE, + &ListenSocket, &AcceptSocket)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + + size = sizeof(struct sockaddr_in6) + 16; + buf = PyBytes_FromStringAndSize(NULL, size*2); + if (!buf) + return NULL; + + self->type = TYPE_ACCEPT; + self->handle = (HANDLE)ListenSocket; + self->read_buffer = buf; + + Py_BEGIN_ALLOW_THREADS + ret = Py_AcceptEx(ListenSocket, AcceptSocket, PyBytes_AS_STRING(buf), + 0, size, size, &BytesReceived, &self->overlapped); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : WSAGetLastError(); + switch (err) { + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + + +static int +parse_address(PyObject *obj, SOCKADDR *Address, int Length) +{ + char *Host; + unsigned short Port; + unsigned long FlowInfo; + unsigned long ScopeId; + + memset(Address, 0, Length); + + if (PyArg_ParseTuple(obj, "sH", &Host, &Port)) + { + Address->sa_family = AF_INET; + if (WSAStringToAddressA(Host, AF_INET, NULL, Address, &Length) < 0) { + SetFromWindowsErr(WSAGetLastError()); + return -1; + } + ((SOCKADDR_IN*)Address)->sin_port = htons(Port); + return Length; + } + else if (PyArg_ParseTuple(obj, "sHkk", &Host, &Port, &FlowInfo, &ScopeId)) + { + PyErr_Clear(); + Address->sa_family = AF_INET6; + if (WSAStringToAddressA(Host, AF_INET6, NULL, Address, &Length) < 0) { + SetFromWindowsErr(WSAGetLastError()); + return -1; + } + ((SOCKADDR_IN6*)Address)->sin6_port = htons(Port); + ((SOCKADDR_IN6*)Address)->sin6_flowinfo = FlowInfo; + ((SOCKADDR_IN6*)Address)->sin6_scope_id = ScopeId; + return Length; + } + + return -1; +} + + +PyDoc_STRVAR( + Overlapped_ConnectEx_doc, + "ConnectEx(client_handle, address_as_bytes) -> Overlapped[None]\n\n" + "Start overlapped connect. client_handle should be unbound."); + +static PyObject * +Overlapped_ConnectEx(OverlappedObject *self, PyObject *args) +{ + SOCKET ConnectSocket; + PyObject *AddressObj; + char AddressBuf[sizeof(struct sockaddr_in6)]; + SOCKADDR *Address = (SOCKADDR*)AddressBuf; + int Length; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE "O", &ConnectSocket, &AddressObj)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + + Length = sizeof(AddressBuf); + Length = parse_address(AddressObj, Address, Length); + if (Length < 0) + return NULL; + + self->type = TYPE_CONNECT; + self->handle = (HANDLE)ConnectSocket; + + Py_BEGIN_ALLOW_THREADS + ret = Py_ConnectEx(ConnectSocket, Address, Length, + NULL, 0, NULL, &self->overlapped); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : WSAGetLastError(); + switch (err) { + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_DisconnectEx_doc, + "DisconnectEx(handle, flags) -> Overlapped[None]\n\n" + "Start overlapped connect. client_handle should be unbound."); + +static PyObject * +Overlapped_DisconnectEx(OverlappedObject *self, PyObject *args) +{ + SOCKET Socket; + DWORD flags; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD, &Socket, &flags)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + + self->type = TYPE_DISCONNECT; + self->handle = (HANDLE)Socket; + + Py_BEGIN_ALLOW_THREADS + ret = Py_DisconnectEx(Socket, &self->overlapped, flags, 0); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : WSAGetLastError(); + switch (err) { + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_NONE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + Overlapped_ConnectNamedPipe_doc, + "ConnectNamedPipe(handle) -> Overlapped[None]\n\n" + "Start overlapped wait for a client to connect."); + +static PyObject * +Overlapped_ConnectNamedPipe(OverlappedObject *self, PyObject *args) +{ + HANDLE Pipe; + BOOL ret; + DWORD err; + + if (!PyArg_ParseTuple(args, F_HANDLE, &Pipe)) + return NULL; + + if (self->type != TYPE_NONE) { + PyErr_SetString(PyExc_ValueError, "operation already attempted"); + return NULL; + } + + self->type = TYPE_CONNECT_NAMED_PIPE; + self->handle = Pipe; + + Py_BEGIN_ALLOW_THREADS + ret = ConnectNamedPipe(Pipe, &self->overlapped); + Py_END_ALLOW_THREADS + + self->error = err = ret ? ERROR_SUCCESS : GetLastError(); + switch (err) { + case ERROR_PIPE_CONNECTED: + mark_as_completed(&self->overlapped); + Py_RETURN_TRUE; + case ERROR_SUCCESS: + case ERROR_IO_PENDING: + Py_RETURN_FALSE; + default: + self->type = TYPE_NOT_STARTED; + return SetFromWindowsErr(err); + } +} + +PyDoc_STRVAR( + ConnectPipe_doc, + "ConnectPipe(addr) -> pipe_handle\n\n" + "Connect to the pipe for asynchronous I/O (overlapped)."); + +static PyObject * +ConnectPipe(OverlappedObject *self, PyObject *args) +{ + PyObject *AddressObj; + HANDLE PipeHandle; +#ifdef PYTHON3 + wchar_t *Address; + + if (!PyArg_ParseTuple(args, "U", &AddressObj)) + return NULL; + + Address = PyUnicode_AsWideCharString(AddressObj, NULL); + if (Address == NULL) + return NULL; + +# define CREATE_FILE CreateFileW +#else + char *Address; + + if (!PyArg_ParseTuple(args, "s", &Address)) + return NULL; + +# define CREATE_FILE CreateFileA +#endif + + Py_BEGIN_ALLOW_THREADS + PipeHandle = CREATE_FILE(Address, + GENERIC_READ | GENERIC_WRITE, + 0, NULL, OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, NULL); + Py_END_ALLOW_THREADS + +#ifdef PYTHON3 + PyMem_Free(Address); +#endif + if (PipeHandle == INVALID_HANDLE_VALUE) + return SetFromWindowsErr(0); + return Py_BuildValue(F_HANDLE, PipeHandle); +} + +static PyObject* +Overlapped_getaddress(OverlappedObject *self) +{ + return PyLong_FromVoidPtr(&self->overlapped); +} + +static PyObject* +Overlapped_getpending(OverlappedObject *self) +{ + return PyBool_FromLong(!HasOverlappedIoCompleted(&self->overlapped) && + self->type != TYPE_NOT_STARTED); +} + +static PyMethodDef Overlapped_methods[] = { + {"getresult", (PyCFunction) Overlapped_getresult, + METH_VARARGS, Overlapped_getresult_doc}, + {"cancel", (PyCFunction) Overlapped_cancel, + METH_NOARGS, Overlapped_cancel_doc}, + {"ReadFile", (PyCFunction) Overlapped_ReadFile, + METH_VARARGS, Overlapped_ReadFile_doc}, + {"WSARecv", (PyCFunction) Overlapped_WSARecv, + METH_VARARGS, Overlapped_WSARecv_doc}, + {"WriteFile", (PyCFunction) Overlapped_WriteFile, + METH_VARARGS, Overlapped_WriteFile_doc}, + {"WSASend", (PyCFunction) Overlapped_WSASend, + METH_VARARGS, Overlapped_WSASend_doc}, + {"AcceptEx", (PyCFunction) Overlapped_AcceptEx, + METH_VARARGS, Overlapped_AcceptEx_doc}, + {"ConnectEx", (PyCFunction) Overlapped_ConnectEx, + METH_VARARGS, Overlapped_ConnectEx_doc}, + {"DisconnectEx", (PyCFunction) Overlapped_DisconnectEx, + METH_VARARGS, Overlapped_DisconnectEx_doc}, + {"ConnectNamedPipe", (PyCFunction) Overlapped_ConnectNamedPipe, + METH_VARARGS, Overlapped_ConnectNamedPipe_doc}, + {NULL} +}; + +static PyMemberDef Overlapped_members[] = { + {"error", T_ULONG, + offsetof(OverlappedObject, error), + READONLY, "Error from last operation"}, + {"event", T_HANDLE, + offsetof(OverlappedObject, overlapped) + offsetof(OVERLAPPED, hEvent), + READONLY, "Overlapped event handle"}, + {NULL} +}; + +static PyGetSetDef Overlapped_getsets[] = { + {"address", (getter)Overlapped_getaddress, NULL, + "Address of overlapped structure"}, + {"pending", (getter)Overlapped_getpending, NULL, + "Whether the operation is pending"}, + {NULL}, +}; + +PyTypeObject OverlappedType = { + PyVarObject_HEAD_INIT(NULL, 0) + /* tp_name */ "_overlapped.Overlapped", + /* tp_basicsize */ sizeof(OverlappedObject), + /* tp_itemsize */ 0, + /* tp_dealloc */ (destructor) Overlapped_dealloc, + /* tp_print */ 0, + /* tp_getattr */ 0, + /* tp_setattr */ 0, + /* tp_reserved */ 0, + /* tp_repr */ 0, + /* tp_as_number */ 0, + /* tp_as_sequence */ 0, + /* tp_as_mapping */ 0, + /* tp_hash */ 0, + /* tp_call */ 0, + /* tp_str */ 0, + /* tp_getattro */ 0, + /* tp_setattro */ 0, + /* tp_as_buffer */ 0, + /* tp_flags */ Py_TPFLAGS_DEFAULT, + /* tp_doc */ "OVERLAPPED structure wrapper", + /* tp_traverse */ 0, + /* tp_clear */ 0, + /* tp_richcompare */ 0, + /* tp_weaklistoffset */ 0, + /* tp_iter */ 0, + /* tp_iternext */ 0, + /* tp_methods */ Overlapped_methods, + /* tp_members */ Overlapped_members, + /* tp_getset */ Overlapped_getsets, + /* tp_base */ 0, + /* tp_dict */ 0, + /* tp_descr_get */ 0, + /* tp_descr_set */ 0, + /* tp_dictoffset */ 0, + /* tp_init */ 0, + /* tp_alloc */ 0, + /* tp_new */ Overlapped_new, +}; + +static PyMethodDef overlapped_functions[] = { + {"CreateIoCompletionPort", overlapped_CreateIoCompletionPort, + METH_VARARGS, CreateIoCompletionPort_doc}, + {"GetQueuedCompletionStatus", overlapped_GetQueuedCompletionStatus, + METH_VARARGS, GetQueuedCompletionStatus_doc}, + {"PostQueuedCompletionStatus", overlapped_PostQueuedCompletionStatus, + METH_VARARGS, PostQueuedCompletionStatus_doc}, + {"FormatMessage", overlapped_FormatMessage, + METH_VARARGS, FormatMessage_doc}, + {"BindLocal", overlapped_BindLocal, + METH_VARARGS, BindLocal_doc}, + {"RegisterWaitWithQueue", overlapped_RegisterWaitWithQueue, + METH_VARARGS, RegisterWaitWithQueue_doc}, + {"UnregisterWait", overlapped_UnregisterWait, + METH_VARARGS, UnregisterWait_doc}, + {"UnregisterWaitEx", overlapped_UnregisterWaitEx, + METH_VARARGS, UnregisterWaitEx_doc}, + {"CreateEvent", overlapped_CreateEvent, + METH_VARARGS, CreateEvent_doc}, + {"SetEvent", overlapped_SetEvent, + METH_VARARGS, SetEvent_doc}, + {"ResetEvent", overlapped_ResetEvent, + METH_VARARGS, ResetEvent_doc}, + {"ConnectPipe", + (PyCFunction) ConnectPipe, + METH_VARARGS, ConnectPipe_doc}, + {NULL} +}; + +#ifdef PYTHON3 +static struct PyModuleDef overlapped_module = { + PyModuleDef_HEAD_INIT, + "_overlapped", + NULL, + -1, + overlapped_functions, + NULL, + NULL, + NULL, + NULL +}; +#endif + +#define WINAPI_CONSTANT(fmt, con) \ + PyDict_SetItemString(d, #con, Py_BuildValue(fmt, con)) + +PyObject* +_init_overlapped(void) +{ + PyObject *m, *d; + + /* Ensure WSAStartup() called before initializing function pointers */ + m = PyImport_ImportModule("_socket"); + if (!m) + return NULL; + Py_DECREF(m); + + if (initialize_function_pointers() < 0) + return NULL; + + if (PyType_Ready(&OverlappedType) < 0) + return NULL; + +#ifdef PYTHON3 + m = PyModule_Create(&overlapped_module); +#else + m = Py_InitModule("_overlapped", overlapped_functions); +#endif + if (PyModule_AddObject(m, "Overlapped", (PyObject *)&OverlappedType) < 0) + return NULL; + + d = PyModule_GetDict(m); + + WINAPI_CONSTANT(F_DWORD, ERROR_IO_PENDING); + WINAPI_CONSTANT(F_DWORD, ERROR_NETNAME_DELETED); + WINAPI_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT); + WINAPI_CONSTANT(F_DWORD, ERROR_PIPE_BUSY); + WINAPI_CONSTANT(F_DWORD, INFINITE); + WINAPI_CONSTANT(F_HANDLE, INVALID_HANDLE_VALUE); + WINAPI_CONSTANT(F_HANDLE, NULL); + WINAPI_CONSTANT(F_DWORD, SO_UPDATE_ACCEPT_CONTEXT); + WINAPI_CONSTANT(F_DWORD, SO_UPDATE_CONNECT_CONTEXT); + WINAPI_CONSTANT(F_DWORD, TF_REUSE_SOCKET); + WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_REFUSED); + WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_ABORTED); + + return m; +} + +#ifdef PYTHON3 +PyMODINIT_FUNC +PyInit__overlapped(void) +{ + return _init_overlapped(); +} +#else +PyMODINIT_FUNC +init_overlapped(void) +{ + _init_overlapped(); +} +#endif diff --git a/pypi.bat b/pypi.bat new file mode 100644 index 00000000..5218ace3 --- /dev/null +++ b/pypi.bat @@ -0,0 +1 @@ +c:\Python33\python.exe setup.py bdist_wheel upload diff --git a/releaser.conf b/releaser.conf new file mode 100644 index 00000000..1b45fd09 --- /dev/null +++ b/releaser.conf @@ -0,0 +1,7 @@ +# Configuration file for the tool "releaser" +# https://github.com/vstinner/misc/blob/master/bin/releaser.py + +[project] +name = trollius +debug_env_var = TROLLIUSDEBUG +python_versions = 2.7, 3.3, 3.4 diff --git a/runtests.py b/runtests.py new file mode 100755 index 00000000..7eea28c1 --- /dev/null +++ b/runtests.py @@ -0,0 +1,334 @@ +#!/usr/bin/env python +"""Run trollius unittests. + +Usage: + python3 runtests.py [flags] [pattern] ... + +Patterns are matched against the fully qualified name of the test, +including package, module, class and method, +e.g. 'tests.test_events.PolicyTests.testPolicy'. + +For full help, try --help. + +runtests.py --coverage is equivalent of: + + $(COVERAGE) run --branch runtests.py -v + $(COVERAGE) html $(list of files) + $(COVERAGE) report -m $(list of files) + +""" + +# Originally written by Beech Horn (for NDB). + +from __future__ import print_function +import optparse +import gc +import logging +import os +import random +import re +import sys +import textwrap +PY2 = (sys.version_info < (3,)) +PY33 = (sys.version_info >= (3, 3)) +if PY33: + import importlib.machinery +else: + import imp +try: + import coverage +except ImportError: + coverage = None +if PY2: + sys.exc_clear() + +try: + import unittest2 as unittest + from unittest2.signals import installHandler +except ImportError: + import unittest + from unittest.signals import installHandler + +ARGS = optparse.OptionParser(description="Run all unittests.", usage="%prog [options] [pattern] [pattern2 ...]") +ARGS.add_option( + '-v', '--verbose', type=int, dest='verbose', + default=0, help='verbose') +ARGS.add_option( + '-x', action="store_true", dest='exclude', help='exclude tests') +ARGS.add_option( + '-f', '--failfast', action="store_true", default=False, + dest='failfast', help='Stop on first fail or error') +ARGS.add_option( + '--no-ssl', action="store_true", default=False, + help='Disable the SSL module') +ARGS.add_option( + '--no-concurrent', action="store_true", default=False, + help='Disable the concurrent module') +ARGS.add_option( + '-c', '--catch', action="store_true", default=False, + dest='catchbreak', help='Catch control-C and display results') +ARGS.add_option( + '--forever', action="store_true", dest='forever', default=False, + help='run tests forever to catch sporadic errors') +ARGS.add_option( + '--findleaks', action='store_true', dest='findleaks', + help='detect tests that leak memory') +ARGS.add_option( + '-r', '--randomize', action='store_true', + help='randomize test execution order.') +ARGS.add_option( + '--seed', type=int, + help='random seed to reproduce a previous random run') +ARGS.add_option( + '-q', action="store_true", dest='quiet', help='quiet') +ARGS.add_option( + '--tests', action="store", dest='testsdir', default='tests', + help='tests directory') +ARGS.add_option( + '--coverage', action="store_true", dest='coverage', + help='enable html coverage report') + + +if PY33: + def load_module(modname, sourcefile): + loader = importlib.machinery.SourceFileLoader(modname, sourcefile) + return loader.load_module() +else: + def load_module(modname, sourcefile): + return imp.load_source(modname, sourcefile) + + +def load_modules(basedir, suffix='.py'): + import trollius.test_utils + + def list_dir(prefix, dir): + files = [] + + modpath = os.path.join(dir, '__init__.py') + if os.path.isfile(modpath): + mod = os.path.split(dir)[-1] + files.append(('{0}{1}'.format(prefix, mod), modpath)) + + prefix = '{0}{1}.'.format(prefix, mod) + + for name in os.listdir(dir): + path = os.path.join(dir, name) + + if os.path.isdir(path): + files.extend(list_dir('{0}{1}.'.format(prefix, name), path)) + else: + if (name != '__init__.py' and + name.endswith(suffix) and + not name.startswith(('.', '_'))): + files.append(('{0}{1}'.format(prefix, name[:-3]), path)) + + return files + + mods = [] + for modname, sourcefile in list_dir('', basedir): + if modname == 'runtests': + continue + if modname == 'test_asyncio' and not PY33: + print("Skipping '{0}': need at least Python 3.3".format(modname), + file=sys.stderr) + continue + try: + mod = load_module(modname, sourcefile) + mods.append((mod, sourcefile)) + except SyntaxError: + raise + except trollius.test_utils.SkipTest as err: + print("Skipping '{0}': {1}".format(modname, err), file=sys.stderr) + + return mods + + +def randomize_tests(tests, seed): + if seed is None: + seed = random.randrange(10000000) + random.seed(seed) + print("Randomize test execution order (seed: %s)" % seed) + random.shuffle(tests._tests) + + +class TestsFinder(object): + + def __init__(self, testsdir, includes=(), excludes=()): + self._testsdir = testsdir + self._includes = includes + self._excludes = excludes + self.find_available_tests() + + def find_available_tests(self): + """ + Find available test classes without instantiating them. + """ + self._test_factories = [] + mods = [mod for mod, _ in load_modules(self._testsdir)] + for mod in mods: + for name in set(dir(mod)): + if name.endswith('Tests') or name.startswith('Test'): + self._test_factories.append(getattr(mod, name)) + + def load_tests(self): + """ + Load test cases from the available test classes and apply + optional include / exclude filters. + """ + loader = unittest.TestLoader() + suite = unittest.TestSuite() + for test_factory in self._test_factories: + tests = loader.loadTestsFromTestCase(test_factory) + if self._includes: + tests = [test + for test in tests + if any(re.search(pat, test.id()) + for pat in self._includes)] + if self._excludes: + tests = [test + for test in tests + if not any(re.search(pat, test.id()) + for pat in self._excludes)] + suite.addTests(tests) + return suite + + +class TestResult(unittest.TextTestResult): + + def __init__(self, stream, descriptions, verbosity): + super().__init__(stream, descriptions, verbosity) + self.leaks = [] + + def startTest(self, test): + super().startTest(test) + gc.collect() + + def addSuccess(self, test): + super().addSuccess(test) + gc.collect() + if gc.garbage: + if self.showAll: + self.stream.writeln( + " Warning: test created {} uncollectable " + "object(s).".format(len(gc.garbage))) + # move the uncollectable objects somewhere so we don't see + # them again + self.leaks.append((self.getDescription(test), gc.garbage[:])) + del gc.garbage[:] + + +class TestRunner(unittest.TextTestRunner): + resultclass = TestResult + + def run(self, test): + result = super().run(test) + if result.leaks: + self.stream.writeln("{0} tests leaks:".format(len(result.leaks))) + for name, leaks in result.leaks: + self.stream.writeln(' '*4 + name + ':') + for leak in leaks: + self.stream.writeln(' '*8 + repr(leak)) + return result + + +def _runtests(args, tests): + v = 0 if args.quiet else args.verbose + 1 + runner_factory = TestRunner if args.findleaks else unittest.TextTestRunner + if args.randomize: + randomize_tests(tests, args.seed) + runner = runner_factory(verbosity=v, failfast=args.failfast) + sys.stdout.flush() + sys.stderr.flush() + return runner.run(tests) + + +def runtests(): + args, pattern = ARGS.parse_args() + + if args.no_ssl: + sys.modules['ssl'] = None + + if args.no_concurrent: + sys.modules['concurrent'] = None + + if args.coverage and coverage is None: + URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py" + print(textwrap.dedent(""" + coverage package is not installed. + + To install coverage3 for Python 3, you need: + - Setuptools (https://pypi.python.org/pypi/setuptools) + + What worked for me: + - download {0} + * curl -O https://{0} + - python3 ez_setup.py + - python3 -m easy_install coverage + """.format(URL)).strip()) + sys.exit(1) + + testsdir = os.path.abspath(args.testsdir) + if not os.path.isdir(testsdir): + print("Tests directory is not found: {0}\n".format(testsdir)) + ARGS.print_help() + return + + excludes = includes = [] + if args.exclude: + excludes = pattern + else: + includes = pattern + + v = 0 if args.quiet else args.verbose + 1 + failfast = args.failfast + + if args.coverage: + cov = coverage.coverage(branch=True, + source=['asyncio'], + ) + cov.start() + + if v == 0: + level = logging.CRITICAL + elif v == 1: + level = logging.ERROR + elif v == 2: + level = logging.WARNING + elif v == 3: + level = logging.INFO + elif v >= 4: + level = logging.DEBUG + logging.basicConfig(level=level) + + finder = TestsFinder(args.testsdir, includes, excludes) + if args.catchbreak: + installHandler() + import trollius.coroutines + if trollius.coroutines._DEBUG: + print("Run tests in debug mode") + else: + print("Run tests in release mode") + try: + tests = finder.load_tests() + if args.forever: + while True: + result = _runtests(args, tests) + if not result.wasSuccessful(): + sys.exit(1) + else: + result = _runtests(args, tests) + sys.exit(not result.wasSuccessful()) + finally: + if args.coverage: + cov.stop() + cov.save() + cov.html_report(directory='htmlcov') + print("\nCoverage report:") + cov.report(show_missing=False) + here = os.path.dirname(os.path.abspath(__file__)) + print("\nFor html report:") + print("open file://{0}/htmlcov/index.html".format(here)) + + +if __name__ == '__main__': + runtests() diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..e058b5a4 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,8 @@ +[bdist_wheel] +universal = 0 + +[zest.releaser] +create-wheel = no + +[metadata] +long_description_content_type = text/x-rst diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..997bac0f --- /dev/null +++ b/setup.py @@ -0,0 +1,45 @@ +import os +import sys +from setuptools import setup, Extension + +with open("README.rst") as fp: + long_description = fp.read() + +with open('CHANGES.rst') as fp: + long_description += '\n\n' + fp.read() + +extensions = [] +if os.name == 'nt': + ext = Extension( + 'trollius._overlapped', ['overlapped.c'], libraries=['ws2_32'], + ) + extensions.append(ext) + +requirements = ['six'] +if sys.version_info < (3,): + requirements.append('futures') + +setup( + name="trollius", + version='2.2.2.dev0', + license="Apache License 2.0", + author='Victor Stinner', + author_email='victor.stinner@gmail.com', + description="Deprecated, unmaintained port of the asyncio module (PEP 3156) on Python 2", + long_description=long_description, + url="https://github.com/jamadden/trollius", + classifiers=[ + "Programming Language :: Python", + "Programming Language :: Python :: 2.7", + "License :: OSI Approved :: Apache Software License", + "Development Status :: 7 - Inactive", + ], + packages=[ + "trollius", + ], + zip_safe=False, + keywords="Deprecated Unmaintained asyncio backport", + ext_modules=extensions, + install_requires=requirements, + python_requires=">=2.7, < 3", +) diff --git a/tests/echo.py b/tests/echo.py new file mode 100644 index 00000000..006364bb --- /dev/null +++ b/tests/echo.py @@ -0,0 +1,8 @@ +import os + +if __name__ == '__main__': + while True: + buf = os.read(0, 1024) + if not buf: + break + os.write(1, buf) diff --git a/tests/echo2.py b/tests/echo2.py new file mode 100644 index 00000000..e83ca09f --- /dev/null +++ b/tests/echo2.py @@ -0,0 +1,6 @@ +import os + +if __name__ == '__main__': + buf = os.read(0, 1024) + os.write(1, b'OUT:'+buf) + os.write(2, b'ERR:'+buf) diff --git a/tests/echo3.py b/tests/echo3.py new file mode 100644 index 00000000..a009ea34 --- /dev/null +++ b/tests/echo3.py @@ -0,0 +1,19 @@ +import os +import sys + +asyncio_path = os.path.join(os.path.dirname(__file__), '..') +asyncio_path = os.path.abspath(asyncio_path) + +sys.path.insert(0, asyncio_path) +from trollius.py33_exceptions import wrap_error +sys.path.remove(asyncio_path) + +if __name__ == '__main__': + while True: + buf = os.read(0, 1024) + if not buf: + break + try: + wrap_error(os.write, 1, b'OUT:'+buf) + except OSError as ex: + os.write(2, b'ERR:' + ex.__class__.__name__.encode('ascii')) diff --git a/tests/keycert3.pem b/tests/keycert3.pem new file mode 100644 index 00000000..5bfa62c4 --- /dev/null +++ b/tests/keycert3.pem @@ -0,0 +1,73 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMLgD0kAKDb5cFyP +jbwNfR5CtewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM +9z2j1OlaN+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZ +aggEdkj1TsSsv1zWIYKlPIjlvhuxAgMBAAECgYA0aH+T2Vf3WOPv8KdkcJg6gCRe +yJKXOWgWRcicx/CUzOEsTxmFIDPLxqAWA3k7v0B+3vjGw5Y9lycV/5XqXNoQI14j +y09iNsumds13u5AKkGdTJnZhQ7UKdoVHfuP44ZdOv/rJ5/VD6F4zWywpe90pcbK+ +AWDVtusgGQBSieEl1QJBAOyVrUG5l2yoUBtd2zr/kiGm/DYyXlIthQO/A3/LngDW +5/ydGxVsT7lAVOgCsoT+0L4efTh90PjzW8LPQrPBWVMCQQDS3h/FtYYd5lfz+FNL +9CEe1F1w9l8P749uNUD0g317zv1tatIqVCsQWHfVHNdVvfQ+vSFw38OORO00Xqs9 +1GJrAkBkoXXEkxCZoy4PteheO/8IWWLGGr6L7di6MzFl1lIqwT6D8L9oaV2vynFT +DnKop0pa09Unhjyw57KMNmSE2SUJAkEArloTEzpgRmCq4IK2/NpCeGdHS5uqRlbh +1VIa/xGps7EWQl5Mn8swQDel/YP3WGHTjfx7pgSegQfkyaRtGpZ9OQJAa9Vumj8m +JAAtI0Bnga8hgQx7BhTQY4CadDxyiRGOGYhwUzYVCqkb2sbVRH9HnwUaJT7cWBY3 +RnJdHOMXWem7/w== +-----END PRIVATE KEY----- +Certificate: + Data: + Version: 1 (0x0) + Serial Number: 12723342612721443281 (0xb09264b1f2da21d1) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server + Validity + Not Before: Jan 4 19:47:07 2013 GMT + Not After : Nov 13 19:47:07 2022 GMT + Subject: C=XY, L=Castle Anthrax, O=Python Software Foundation, CN=localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c2:e0:0f:49:00:28:36:f9:70:5c:8f:8d:bc:0d: + 7d:1e:42:b5:ec:1d:5c:2f:a4:31:70:16:0f:c0:cb: + c6:24:d3:be:13:16:ee:a5:67:97:03:a6:df:a9:99: + 96:cc:c7:2a:fb:11:7f:4e:65:4f:8a:5e:82:21:4c: + f7:3d:a3:d4:e9:5a:37:e7:22:fd:7e:cd:53:6d:93: + 34:de:9c:ad:84:a2:37:be:c5:8d:82:4f:e3:ae:23: + f3:be:a7:75:2c:72:0f:ea:f3:ca:cd:fc:e9:3f:b5: + af:56:99:6a:08:04:76:48:f5:4e:c4:ac:bf:5c:d6: + 21:82:a5:3c:88:e5:be:1b:b1 + Exponent: 65537 (0x10001) + Signature Algorithm: sha1WithRSAEncryption + 2f:42:5f:a3:09:2c:fa:51:88:c7:37:7f:ea:0e:63:f0:a2:9a: + e5:5a:e2:c8:20:f0:3f:60:bc:c8:0f:b6:c6:76:ce:db:83:93: + f5:a3:33:67:01:8e:04:cd:00:9a:73:fd:f3:35:86:fa:d7:13: + e2:46:c6:9d:c0:29:53:d4:a9:90:b8:77:4b:e6:83:76:e4:92: + d6:9c:50:cf:43:d0:c6:01:77:61:9a:de:9b:70:f7:72:cd:59: + 00:31:69:d9:b4:ca:06:9c:6d:c3:c7:80:8c:68:e6:b5:a2:f8: + ef:1d:bb:16:9f:77:77:ef:87:62:22:9b:4d:69:a4:3a:1a:f1: + 21:5e:8c:32:ac:92:fd:15:6b:18:c2:7f:15:0d:98:30:ca:75: + 8f:1a:71:df:da:1d:b2:ef:9a:e8:2d:2e:02:fd:4a:3c:aa:96: + 0b:06:5d:35:b3:3d:24:87:4b:e0:b0:58:60:2f:45:ac:2e:48: + 8a:b0:99:10:65:27:ff:cc:b1:d8:fd:bd:26:6b:b9:0c:05:2a: + f4:45:63:35:51:07:ed:83:85:fe:6f:69:cb:bb:40:a8:ae:b6: + 3b:56:4a:2d:a4:ed:6d:11:2c:4d:ed:17:24:fd:47:bc:d3:41: + a2:d3:06:fe:0c:90:d8:d8:94:26:c4:ff:cc:a1:d8:42:77:eb: + fc:a9:94:71 +-----BEGIN CERTIFICATE----- +MIICpDCCAYwCCQCwkmSx8toh0TANBgkqhkiG9w0BAQUFADBNMQswCQYDVQQGEwJY +WTEmMCQGA1UECgwdUHl0aG9uIFNvZnR3YXJlIEZvdW5kYXRpb24gQ0ExFjAUBgNV +BAMMDW91ci1jYS1zZXJ2ZXIwHhcNMTMwMTA0MTk0NzA3WhcNMjIxMTEzMTk0NzA3 +WjBfMQswCQYDVQQGEwJYWTEXMBUGA1UEBxMOQ2FzdGxlIEFudGhyYXgxIzAhBgNV +BAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMRIwEAYDVQQDEwlsb2NhbGhv +c3QwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMLgD0kAKDb5cFyPjbwNfR5C +tewdXC+kMXAWD8DLxiTTvhMW7qVnlwOm36mZlszHKvsRf05lT4pegiFM9z2j1Ola +N+ci/X7NU22TNN6crYSiN77FjYJP464j876ndSxyD+rzys386T+1r1aZaggEdkj1 +TsSsv1zWIYKlPIjlvhuxAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAC9CX6MJLPpR +iMc3f+oOY/CimuVa4sgg8D9gvMgPtsZ2ztuDk/WjM2cBjgTNAJpz/fM1hvrXE+JG +xp3AKVPUqZC4d0vmg3bkktacUM9D0MYBd2Ga3ptw93LNWQAxadm0ygacbcPHgIxo +5rWi+O8duxafd3fvh2Iim01ppDoa8SFejDKskv0VaxjCfxUNmDDKdY8acd/aHbLv +mugtLgL9SjyqlgsGXTWzPSSHS+CwWGAvRawuSIqwmRBlJ//Msdj9vSZruQwFKvRF +YzVRB+2Dhf5vacu7QKiutjtWSi2k7W0RLE3tFyT9R7zTQaLTBv4MkNjYlCbE/8yh +2EJ36/yplHE= +-----END CERTIFICATE----- diff --git a/tests/pycacert.pem b/tests/pycacert.pem new file mode 100644 index 00000000..09b1f3e0 --- /dev/null +++ b/tests/pycacert.pem @@ -0,0 +1,78 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 12723342612721443280 (0xb09264b1f2da21d0) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server + Validity + Not Before: Jan 4 19:47:07 2013 GMT + Not After : Jan 2 19:47:07 2023 GMT + Subject: C=XY, O=Python Software Foundation CA, CN=our-ca-server + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e7:de:e9:e3:0c:9f:00:b6:a1:fd:2b:5b:96:d2: + 6f:cc:e0:be:86:b9:20:5e:ec:03:7a:55:ab:ea:a4: + e9:f9:49:85:d2:66:d5:ed:c7:7a:ea:56:8e:2d:8f: + e7:42:e2:62:28:a9:9f:d6:1b:8e:eb:b5:b4:9c:9f: + 14:ab:df:e6:94:8b:76:1d:3e:6d:24:61:ed:0c:bf: + 00:8a:61:0c:df:5c:c8:36:73:16:00:cd:47:ba:6d: + a4:a4:74:88:83:23:0a:19:fc:09:a7:3c:4a:4b:d3: + e7:1d:2d:e4:ea:4c:54:21:f3:26:db:89:37:18:d4: + 02:bb:40:32:5f:a4:ff:2d:1c:f7:d4:bb:ec:8e:cf: + 5c:82:ac:e6:7c:08:6c:48:85:61:07:7f:25:e0:5c: + e0:bc:34:5f:e0:b9:04:47:75:c8:47:0b:8d:bc:d6: + c8:68:5f:33:83:62:d2:20:44:35:b1:ad:81:1a:8a: + cd:bc:35:b0:5c:8b:47:d6:18:e9:9c:18:97:cc:01: + 3c:29:cc:e8:1e:e4:e4:c1:b8:de:e7:c2:11:18:87: + 5a:93:34:d8:a6:25:f7:14:71:eb:e4:21:a2:d2:0f: + 2e:2e:d4:62:00:35:d3:d6:ef:5c:60:4b:4c:a9:14: + e2:dd:15:58:46:37:33:26:b7:e7:2e:5d:ed:42:e4: + c5:4d + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B + X509v3 Authority Key Identifier: + keyid:BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B + + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha1WithRSAEncryption + 7d:0a:f5:cb:8d:d3:5d:bd:99:8e:f8:2b:0f:ba:eb:c2:d9:a6: + 27:4f:2e:7b:2f:0e:64:d8:1c:35:50:4e:ee:fc:90:b9:8d:6d: + a8:c5:c6:06:b0:af:f3:2d:bf:3b:b8:42:07:dd:18:7d:6d:95: + 54:57:85:18:60:47:2f:eb:78:1b:f9:e8:17:fd:5a:0d:87:17: + 28:ac:4c:6a:e6:bc:29:f4:f4:55:70:29:42:de:85:ea:ab:6c: + 23:06:64:30:75:02:8e:53:bc:5e:01:33:37:cc:1e:cd:b8:a4: + fd:ca:e4:5f:65:3b:83:1c:86:f1:55:02:a0:3a:8f:db:91:b7: + 40:14:b4:e7:8d:d2:ee:73:ba:e3:e5:34:2d:bc:94:6f:4e:24: + 06:f7:5f:8b:0e:a7:8e:6b:de:5e:75:f4:32:9a:50:b1:44:33: + 9a:d0:05:e2:78:82:ff:db:da:8a:63:eb:a9:dd:d1:bf:a0:61: + ad:e3:9e:8a:24:5d:62:0e:e7:4c:91:7f:ef:df:34:36:3b:2f: + 5d:f5:84:b2:2f:c4:6d:93:96:1a:6f:30:28:f1:da:12:9a:64: + b4:40:33:1d:bd:de:2b:53:a8:ea:be:d6:bc:4e:96:f5:44:fb: + 32:18:ae:d5:1f:f6:69:af:b6:4e:7b:1d:58:ec:3b:a9:53:a3: + 5e:58:c8:9e +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIJALCSZLHy2iHQMA0GCSqGSIb3DQEBBQUAME0xCzAJBgNV +BAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUgRm91bmRhdGlvbiBDQTEW +MBQGA1UEAwwNb3VyLWNhLXNlcnZlcjAeFw0xMzAxMDQxOTQ3MDdaFw0yMzAxMDIx +OTQ3MDdaME0xCzAJBgNVBAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUg +Rm91bmRhdGlvbiBDQTEWMBQGA1UEAwwNb3VyLWNhLXNlcnZlcjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAOfe6eMMnwC2of0rW5bSb8zgvoa5IF7sA3pV +q+qk6flJhdJm1e3HeupWji2P50LiYiipn9Ybjuu1tJyfFKvf5pSLdh0+bSRh7Qy/ +AIphDN9cyDZzFgDNR7ptpKR0iIMjChn8Cac8SkvT5x0t5OpMVCHzJtuJNxjUArtA +Ml+k/y0c99S77I7PXIKs5nwIbEiFYQd/JeBc4Lw0X+C5BEd1yEcLjbzWyGhfM4Ni +0iBENbGtgRqKzbw1sFyLR9YY6ZwYl8wBPCnM6B7k5MG43ufCERiHWpM02KYl9xRx +6+QhotIPLi7UYgA109bvXGBLTKkU4t0VWEY3Mya35y5d7ULkxU0CAwEAAaNQME4w +HQYDVR0OBBYEFLzdYtl22hvSVGvP4GabHh57VgwLMB8GA1UdIwQYMBaAFLzdYtl2 +2hvSVGvP4GabHh57VgwLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AH0K9cuN0129mY74Kw+668LZpidPLnsvDmTYHDVQTu78kLmNbajFxgawr/Mtvzu4 +QgfdGH1tlVRXhRhgRy/reBv56Bf9Wg2HFyisTGrmvCn09FVwKULeheqrbCMGZDB1 +Ao5TvF4BMzfMHs24pP3K5F9lO4MchvFVAqA6j9uRt0AUtOeN0u5zuuPlNC28lG9O +JAb3X4sOp45r3l519DKaULFEM5rQBeJ4gv/b2opj66nd0b+gYa3jnookXWIO50yR +f+/fNDY7L131hLIvxG2TlhpvMCjx2hKaZLRAMx293itTqOq+1rxOlvVE+zIYrtUf +9mmvtk57HVjsO6lTo15YyJ4= +-----END CERTIFICATE----- diff --git a/tests/sample.crt b/tests/sample.crt new file mode 100644 index 00000000..6a1e3f3c --- /dev/null +++ b/tests/sample.crt @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICMzCCAZwCCQDFl4ys0fU7iTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJV +UzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuLUZyYW5jaXNjbzEi +MCAGA1UECgwZUHl0aG9uIFNvZnR3YXJlIEZvbmRhdGlvbjAeFw0xMzAzMTgyMDA3 +MjhaFw0yMzAzMTYyMDA3MjhaMF4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxp +Zm9ybmlhMRYwFAYDVQQHDA1TYW4tRnJhbmNpc2NvMSIwIAYDVQQKDBlQeXRob24g +U29mdHdhcmUgRm9uZGF0aW9uMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCn +t3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx/47Vc5TZSaO11uO7 +gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIiqusnLfpqR8cIAavg +Z06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQABMA0GCSqGSIb3DQEB +BQUAA4GBAE9PknG6pv72+5z/gsDGYy8sK5UNkbWSNr4i4e5lxVsF03+/M71H+3AB +MxVX4+A+Vlk2fmU+BrdHIIUE0r1dDcO3josQ9hc9OJpp5VLSQFP8VeuJCmzYPp9I +I8WbW93cnXnChTrYQVdgVoFdv7GE9YgU7NYkrGIM0nZl1/f/bHPB +-----END CERTIFICATE----- diff --git a/tests/sample.key b/tests/sample.key new file mode 100644 index 00000000..edfea8dc --- /dev/null +++ b/tests/sample.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCnt3s+J7L0xP/YdAQOacpPi9phlrzKZhcXL3XMu2LCUg2fNJpx +/47Vc5TZSaO11uO7gdwVz3Z7Q2epAgwo59JLffLt5fia8+a/SlPweI/j4+wcIIIi +qusnLfpqR8cIAavgZ06cLYCDvb9wMlheIvSJY12skc1nnphWS2YJ0Xm6uQIDAQAB +AoGABfm8k19Yue3W68BecKEGS0VBV57GRTPT+MiBGvVGNIQ15gk6w3sGfMZsdD1y +bsUkQgcDb2d/4i5poBTpl/+Cd41V+c20IC/sSl5X1IEreHMKSLhy/uyjyiyfXlP1 +iXhToFCgLWwENWc8LzfUV8vuAV5WG6oL9bnudWzZxeqx8V0CQQDR7xwVj6LN70Eb +DUhSKLkusmFw5Gk9NJ/7wZ4eHg4B8c9KNVvSlLCLhcsVTQXuqYeFpOqytI45SneP +lr0vrvsDAkEAzITYiXu6ox5huDCG7imX2W9CAYuX638urLxBqBXMS7GqBzojD6RL +21Q8oPwJWJquERa3HDScq1deiQbM9uKIkwJBAIa1PLslGN216Xv3UPHPScyKD/aF +ynXIv+OnANPoiyp6RH4ksQ/18zcEGiVH8EeNpvV9tlAHhb+DZibQHgNr74sCQQC0 +zhToplu/bVKSlUQUNO0rqrI9z30FErDewKeCw5KSsIRSU1E/uM3fHr9iyq4wiL6u +GNjUtKZ0y46lsT9uW6LFAkB5eqeEQnshAdr3X5GykWHJ8DDGBXPPn6Rce1NX4RSq +V9khG2z1bFyfo+hMqpYnF2k32hVq3E54RS8YYnwBsVof +-----END RSA PRIVATE KEY----- diff --git a/tests/ssl_cert.pem b/tests/ssl_cert.pem new file mode 100644 index 00000000..47a7d7e3 --- /dev/null +++ b/tests/ssl_cert.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX +-----END CERTIFICATE----- diff --git a/tests/ssl_key.pem b/tests/ssl_key.pem new file mode 100644 index 00000000..3fd3bbd5 --- /dev/null +++ b/tests/ssl_key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py new file mode 100644 index 00000000..0421db04 --- /dev/null +++ b/tests/test_asyncio.py @@ -0,0 +1,141 @@ +from trollius import test_utils +from trollius import From, Return +import trollius +import trollius.coroutines +from trollius.test_utils import unittest + +try: + import asyncio +except ImportError: + from trollius.test_utils import SkipTest + raise SkipTest('need asyncio') + + +@asyncio.coroutine +def asyncio_noop(value): + yield from [] + return (value,) + +@asyncio.coroutine +def asyncio_coroutine(coro, value): + res = yield from coro + return res + (value,) + +@trollius.coroutine +def trollius_noop(value): + yield From(None) + raise Return((value,)) + +@trollius.coroutine +def trollius_coroutine(coro, value): + res = yield trollius.From(coro) + raise trollius.Return(res + (value,)) + + +class AsyncioTests(test_utils.TestCase): + def setUp(self): + policy = trollius.get_event_loop_policy() + + asyncio.set_event_loop_policy(policy) + self.addCleanup(asyncio.set_event_loop_policy, None) + + self.loop = policy.new_event_loop() + self.addCleanup(self.loop.close) + policy.set_event_loop(self.loop) + + def test_policy(self): + self.assertIs(asyncio.get_event_loop(), self.loop) + + def test_asyncio(self): + coro = asyncio_noop("asyncio") + res = self.loop.run_until_complete(coro) + self.assertEqual(res, ("asyncio",)) + + def test_asyncio_in_trollius(self): + coro1 = asyncio_noop(1) + coro2 = asyncio_coroutine(coro1, 2) + res = self.loop.run_until_complete(trollius_coroutine(coro2, 3)) + self.assertEqual(res, (1, 2, 3)) + + def test_trollius_in_asyncio(self): + coro1 = trollius_noop(4) + coro2 = trollius_coroutine(coro1, 5) + res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6)) + self.assertEqual(res, (4, 5, 6)) + + def test_step_future(self): + old_debug = trollius.coroutines._DEBUG + try: + def step_future(): + future = asyncio.Future() + self.loop.call_soon(future.set_result, "asyncio.Future") + return (yield from future) + + # test in release mode + trollius.coroutines._DEBUG = False + result = self.loop.run_until_complete(step_future()) + self.assertEqual(result, "asyncio.Future") + + # test in debug mode + trollius.coroutines._DEBUG = True + result = self.loop.run_until_complete(step_future()) + self.assertEqual(result, "asyncio.Future") + finally: + trollius.coroutines._DEBUG = old_debug + + def test_async(self): + fut = asyncio.Future() + self.assertIs(fut._loop, self.loop) + + fut2 = trollius.ensure_future(fut) + self.assertIs(fut2, fut) + self.assertIs(fut._loop, self.loop) + + def test_wrap_future(self): + fut = asyncio.Future() + self.assertIs(trollius.wrap_future(fut), fut) + + def test_run_until_complete(self): + fut = asyncio.Future() + fut.set_result("ok") + self.assertEqual(self.loop.run_until_complete(fut), + "ok") + + def test_coroutine_decorator(self): + @trollius.coroutine + def asyncio_future(fut): + return fut + + fut = asyncio.Future() + self.loop.call_soon(fut.set_result, 'ok') + res = self.loop.run_until_complete(asyncio_future(fut)) + self.assertEqual(res, "ok") + + def test_as_completed(self): + fut = asyncio.Future() + fut.set_result("ok") + + with self.assertRaises(TypeError): + for f in trollius.as_completed(fut): + pass + + @trollius.coroutine + def get_results(fut): + results = [] + for f in trollius.as_completed([fut]): + res = yield trollius.From(f) + results.append(res) + raise trollius.Return(results) + + results = self.loop.run_until_complete(get_results(fut)) + self.assertEqual(results, ["ok"]) + + def test_gather(self): + fut = asyncio.Future() + fut.set_result("ok") + results = self.loop.run_until_complete(trollius.gather(fut)) + self.assertEqual(results, ["ok"]) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_base_events.py b/tests/test_base_events.py new file mode 100644 index 00000000..02ecfeb3 --- /dev/null +++ b/tests/test_base_events.py @@ -0,0 +1,1283 @@ +"""Tests for base_events.py""" + +import errno +import logging +import math +import socket +import sys +import threading +import time + +import trollius as asyncio +from trollius import Return, From +from trollius import base_events +from trollius import constants +from trollius import test_utils +from trollius.py33_exceptions import BlockingIOError +from trollius.test_utils import mock +from trollius.time_monotonic import time_monotonic +from trollius.test_utils import unittest +from trollius import test_support as support + + +MOCK_ANY = mock.ANY +PY34 = sys.version_info >= (3, 4) + + +class BaseEventLoopTests(test_utils.TestCase): + + def setUp(self): + self.loop = base_events.BaseEventLoop() + self.loop._selector = mock.Mock() + self.loop._selector.select.return_value = () + self.set_event_loop(self.loop) + + def test_not_implemented(self): + m = mock.Mock() + self.assertRaises( + NotImplementedError, + self.loop._make_socket_transport, m, m) + self.assertRaises( + NotImplementedError, + self.loop._make_ssl_transport, m, m, m, m) + self.assertRaises( + NotImplementedError, + self.loop._make_datagram_transport, m, m) + self.assertRaises( + NotImplementedError, self.loop._process_events, []) + self.assertRaises( + NotImplementedError, self.loop._write_to_self) + self.assertRaises( + NotImplementedError, + self.loop._make_read_pipe_transport, m, m) + self.assertRaises( + NotImplementedError, + self.loop._make_write_pipe_transport, m, m) + gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m) + # self.assertRaises(NotImplementedError, next, iter(gen)) + with self.assertRaises(NotImplementedError): + gen.send(None) + + def test_close(self): + self.assertFalse(self.loop.is_closed()) + self.loop.close() + self.assertTrue(self.loop.is_closed()) + + # it should be possible to call close() more than once + self.loop.close() + self.loop.close() + + # operation blocked when the loop is closed + f = asyncio.Future(loop=self.loop) + self.assertRaises(RuntimeError, self.loop.run_forever) + self.assertRaises(RuntimeError, self.loop.run_until_complete, f) + + def test__add_callback_handle(self): + h = asyncio.Handle(lambda: False, (), self.loop) + + self.loop._add_callback(h) + self.assertFalse(self.loop._scheduled) + self.assertIn(h, self.loop._ready) + + def test__add_callback_cancelled_handle(self): + h = asyncio.Handle(lambda: False, (), self.loop) + h.cancel() + + self.loop._add_callback(h) + self.assertFalse(self.loop._scheduled) + self.assertFalse(self.loop._ready) + + def test_set_default_executor(self): + executor = mock.Mock() + self.loop.set_default_executor(executor) + self.assertIs(executor, self.loop._default_executor) + + def test_getnameinfo(self): + sockaddr = mock.Mock() + self.loop.run_in_executor = mock.Mock() + self.loop.getnameinfo(sockaddr) + self.assertEqual( + (None, socket.getnameinfo, sockaddr, 0), + self.loop.run_in_executor.call_args[0]) + + def test_call_soon(self): + def cb(): + pass + + h = self.loop.call_soon(cb) + self.assertEqual(h._callback, cb) + self.assertIsInstance(h, asyncio.Handle) + self.assertIn(h, self.loop._ready) + + def test_call_later(self): + def cb(): + pass + + h = self.loop.call_later(10.0, cb) + self.assertIsInstance(h, asyncio.TimerHandle) + self.assertIn(h, self.loop._scheduled) + self.assertNotIn(h, self.loop._ready) + + def test_call_later_negative_delays(self): + calls = [] + + def cb(arg): + calls.append(arg) + + self.loop._process_events = mock.Mock() + self.loop.call_later(-1, cb, 'a') + self.loop.call_later(-2, cb, 'b') + test_utils.run_briefly(self.loop) + self.assertEqual(calls, ['b', 'a']) + + def test_time_and_call_at(self): + def cb(): + self.loop.stop() + + self.loop._process_events = mock.Mock() + delay = 0.1 + + when = self.loop.time() + delay + self.loop.call_at(when, cb) + t0 = self.loop.time() + self.loop.run_forever() + dt = self.loop.time() - t0 + + # 50 ms: maximum granularity of the event loop + self.assertGreaterEqual(dt, delay - 0.050, dt) + # tolerate a difference of +800 ms because some Python buildbots + # are really slow + self.assertLessEqual(dt, 0.9, dt) + + def check_thread(self, loop, debug): + def cb(): + pass + + loop.set_debug(debug) + if debug: + msg = ("Non-thread-safe operation invoked on an event loop other " + "than the current one") + with self.assertRaisesRegex(RuntimeError, msg): + loop.call_soon(cb) + with self.assertRaisesRegex(RuntimeError, msg): + loop.call_later(60, cb) + with self.assertRaisesRegex(RuntimeError, msg): + loop.call_at(loop.time() + 60, cb) + else: + loop.call_soon(cb) + loop.call_later(60, cb) + loop.call_at(loop.time() + 60, cb) + + def test_check_thread(self): + def check_in_thread(loop, event, debug, create_loop, fut): + # wait until the event loop is running + event.wait() + + try: + if create_loop: + loop2 = base_events.BaseEventLoop() + try: + asyncio.set_event_loop(loop2) + self.check_thread(loop, debug) + finally: + asyncio.set_event_loop(None) + loop2.close() + else: + self.check_thread(loop, debug) + except Exception as exc: + loop.call_soon_threadsafe(fut.set_exception, exc) + else: + loop.call_soon_threadsafe(fut.set_result, None) + + def test_thread(loop, debug, create_loop=False): + event = threading.Event() + fut = asyncio.Future(loop=loop) + loop.call_soon(event.set) + args = (loop, event, debug, create_loop, fut) + thread = threading.Thread(target=check_in_thread, args=args) + thread.start() + loop.run_until_complete(fut) + thread.join() + + self.loop._process_events = mock.Mock() + self.loop._write_to_self = mock.Mock() + + # raise RuntimeError if the thread has no event loop + test_thread(self.loop, True) + + # check disabled if debug mode is disabled + test_thread(self.loop, False) + + # raise RuntimeError if the event loop of the thread is not the called + # event loop + test_thread(self.loop, True, create_loop=True) + + # check disabled if debug mode is disabled + test_thread(self.loop, False, create_loop=True) + + def test_run_once_in_executor_handle(self): + def cb(): + pass + + self.assertRaises( + AssertionError, self.loop.run_in_executor, + None, asyncio.Handle(cb, (), self.loop), ('',)) + self.assertRaises( + AssertionError, self.loop.run_in_executor, + None, asyncio.TimerHandle(10, cb, (), self.loop)) + + def test_run_once_in_executor_cancelled(self): + def cb(): + pass + h = asyncio.Handle(cb, (), self.loop) + h.cancel() + + f = self.loop.run_in_executor(None, h) + self.assertIsInstance(f, asyncio.Future) + self.assertTrue(f.done()) + self.assertIsNone(f.result()) + + def test_run_once_in_executor_plain(self): + def cb(): + pass + h = asyncio.Handle(cb, (), self.loop) + f = asyncio.Future(loop=self.loop) + executor = mock.Mock() + executor.submit.return_value = f + + self.loop.set_default_executor(executor) + + res = self.loop.run_in_executor(None, h) + self.assertIs(f, res) + + executor = mock.Mock() + executor.submit.return_value = f + res = self.loop.run_in_executor(executor, h) + self.assertIs(f, res) + self.assertTrue(executor.submit.called) + + f.cancel() # Don't complain about abandoned Future. + + def test__run_once(self): + h1 = asyncio.TimerHandle(time_monotonic() + 5.0, lambda: True, (), + self.loop) + h2 = asyncio.TimerHandle(time_monotonic() + 10.0, lambda: True, (), + self.loop) + + h1.cancel() + + self.loop._process_events = mock.Mock() + self.loop._scheduled.append(h1) + self.loop._scheduled.append(h2) + self.loop._run_once() + + t = self.loop._selector.select.call_args[0][0] + self.assertTrue(9.5 < t < 10.5, t) + self.assertEqual([h2], self.loop._scheduled) + self.assertTrue(self.loop._process_events.called) + + def test_set_debug(self): + self.loop.set_debug(True) + self.assertTrue(self.loop.get_debug()) + self.loop.set_debug(False) + self.assertFalse(self.loop.get_debug()) + + @mock.patch('trollius.base_events.logger') + def test__run_once_logging(self, m_logger): + def slow_select(timeout): + # Sleep a bit longer than a second to avoid timer resolution + # issues. + time.sleep(1.1) + return [] + + # logging needs debug flag + self.loop.set_debug(True) + + # Log to INFO level if timeout > 1.0 sec. + self.loop._selector.select = slow_select + self.loop._process_events = mock.Mock() + self.loop._run_once() + self.assertEqual(logging.INFO, m_logger.log.call_args[0][0]) + + def fast_select(timeout): + time.sleep(0.001) + return [] + + self.loop._selector.select = fast_select + self.loop._run_once() + self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0]) + + def test__run_once_schedule_handle(self): + non_local = {'handle': None, 'processed': False} + + def cb(loop): + non_local['processed'] = True + non_local['handle'] = loop.call_soon(lambda: True) + + h = asyncio.TimerHandle(time_monotonic() - 1, cb, (self.loop,), + self.loop) + + self.loop._process_events = mock.Mock() + self.loop._scheduled.append(h) + self.loop._run_once() + + self.assertTrue(non_local['processed']) + self.assertEqual([non_local['handle']], list(self.loop._ready)) + + def test__run_once_cancelled_event_cleanup(self): + self.loop._process_events = mock.Mock() + + self.assertTrue( + 0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0) + + def cb(): + pass + + # Set up one "blocking" event that will not be cancelled to + # ensure later cancelled events do not make it to the head + # of the queue and get cleaned. + not_cancelled_count = 1 + self.loop.call_later(3000, cb) + + # Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES) + # cancelled handles, ensure they aren't removed + + cancelled_count = 2 + for x in range(2): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Add some cancelled events that will be at head and removed + cancelled_count += 2 + for x in range(2): + h = self.loop.call_later(100, cb) + h.cancel() + + # This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low + self.assertLessEqual(cancelled_count + not_cancelled_count, + base_events._MIN_SCHEDULED_TIMER_HANDLES) + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.loop._run_once() + + cancelled_count -= 2 + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + # Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION + # so that deletion of cancelled events will occur on next _run_once + add_cancel_count = int(math.ceil( + base_events._MIN_SCHEDULED_TIMER_HANDLES * + base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1 + + add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES - + add_cancel_count, 0) + + # Add some events that will not be cancelled + not_cancelled_count += add_not_cancel_count + for x in range(add_not_cancel_count): + self.loop.call_later(3600, cb) + + # Add enough cancelled events + cancelled_count += add_cancel_count + for x in range(add_cancel_count): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Ensure all handles are still scheduled + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + self.loop._run_once() + + # Ensure cancelled events were removed + self.assertEqual(len(self.loop._scheduled), not_cancelled_count) + + # Ensure only uncancelled events remain scheduled + self.assertTrue(all([not x._cancelled for x in self.loop._scheduled])) + + def test_run_until_complete_type_error(self): + self.assertRaises(TypeError, + self.loop.run_until_complete, 'blah') + + def test_run_until_complete_loop(self): + task = asyncio.Future(loop=self.loop) + other_loop = self.new_test_loop() + self.addCleanup(other_loop.close) + self.assertRaises(ValueError, + other_loop.run_until_complete, task) + + def test_subprocess_exec_invalid_args(self): + args = [sys.executable, '-c', 'pass'] + + # missing program parameter (empty args) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol) + + # expected multiple arguments, not a list + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol, args) + + # program arguments must be strings, not int + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol, sys.executable, 123) + + # universal_newlines, shell, bufsize must not be set + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol, *args, universal_newlines=True) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol, *args, shell=True) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_exec, + asyncio.SubprocessProtocol, *args, bufsize=4096) + + def test_subprocess_shell_invalid_args(self): + # expected a string, not an int or a list + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_shell, + asyncio.SubprocessProtocol, 123) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_shell, + asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass']) + + # universal_newlines, shell, bufsize must not be set + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_shell, + asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_shell, + asyncio.SubprocessProtocol, 'exit 0', shell=True) + self.assertRaises(TypeError, + self.loop.run_until_complete, self.loop.subprocess_shell, + asyncio.SubprocessProtocol, 'exit 0', bufsize=4096) + + def test_default_exc_handler_callback(self): + self.loop._process_events = mock.Mock() + + def zero_error(fut): + fut.set_result(True) + 1/0 + + # Test call_soon (events.Handle) + with mock.patch('trollius.base_events.logger') as log: + fut = asyncio.Future(loop=self.loop) + self.loop.call_soon(zero_error, fut) + fut.add_done_callback(lambda fut: self.loop.stop()) + self.loop.run_forever() + log.error.assert_called_with( + test_utils.MockPattern('Exception in callback.*zero'), + exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) + + # Test call_later (events.TimerHandle) + with mock.patch('trollius.base_events.logger') as log: + fut = asyncio.Future(loop=self.loop) + self.loop.call_later(0.01, zero_error, fut) + fut.add_done_callback(lambda fut: self.loop.stop()) + self.loop.run_forever() + log.error.assert_called_with( + test_utils.MockPattern('Exception in callback.*zero'), + exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) + + def test_default_exc_handler_coro(self): + self.loop._process_events = mock.Mock() + self.loop.set_debug(True) + asyncio.set_event_loop(self.loop) + + @asyncio.coroutine + def zero_error_coro(): + yield From(asyncio.sleep(0.01, loop=self.loop)) + 1/0 + + # Test Future.__del__ + with mock.patch('trollius.base_events.logger') as log: + fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop) + fut.add_done_callback(lambda *args: self.loop.stop()) + self.loop.run_forever() + fut = None # Trigger Future.__del__ or futures._TracebackLogger + support.gc_collect() + if PY34: + # Future.__del__ in Python 3.4 logs error with + # an actual exception context + log.error.assert_called_with( + test_utils.MockPattern('.*exception was never retrieved'), + exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) + else: + # futures._TracebackLogger logs only textual traceback + log.error.assert_called_with( + test_utils.MockPattern( + '.*exception was never retrieved.*ZeroDiv'), + exc_info=False) + + def test_set_exc_handler_invalid(self): + with self.assertRaisesRegex(TypeError, 'A callable object or None'): + self.loop.set_exception_handler('spam') + + def test_set_exc_handler_custom(self): + def zero_error(): + 1/0 + + def run_loop(): + handle = self.loop.call_soon(zero_error) + self.loop._run_once() + return handle + + self.loop.set_debug(True) + self.loop._process_events = mock.Mock() + + mock_handler = mock.Mock() + self.loop.set_exception_handler(mock_handler) + handle = run_loop() + mock_handler.assert_called_with(self.loop, { + 'exception': MOCK_ANY, + 'message': test_utils.MockPattern( + 'Exception in callback.*zero_error'), + 'handle': handle, + 'source_traceback': handle._source_traceback, + }) + mock_handler.reset_mock() + + self.loop.set_exception_handler(None) + with mock.patch('trollius.base_events.logger') as log: + run_loop() + log.error.assert_called_with( + test_utils.MockPattern( + 'Exception in callback.*zero'), + exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) + + assert not mock_handler.called + + def test_set_exc_handler_broken(self): + def run_loop(): + def zero_error(): + 1/0 + self.loop.call_soon(zero_error) + self.loop._run_once() + + def handler(loop, context): + raise AttributeError('spam') + + self.loop._process_events = mock.Mock() + + self.loop.set_exception_handler(handler) + + with mock.patch('trollius.base_events.logger') as log: + run_loop() + log.error.assert_called_with( + test_utils.MockPattern( + 'Unhandled error in exception handler'), + exc_info=(AttributeError, MOCK_ANY, MOCK_ANY)) + + def test_default_exc_handler_broken(self): + contexts = [] + + class Loop(base_events.BaseEventLoop): + + _selector = mock.Mock() + _process_events = mock.Mock() + + def default_exception_handler(self, context): + contexts.append(context) + # Simulates custom buggy "default_exception_handler" + raise ValueError('spam') + + loop = Loop() + self.addCleanup(loop.close) + asyncio.set_event_loop(loop) + + def run_loop(): + def zero_error(): + 1/0 + loop.call_soon(zero_error) + loop._run_once() + + with mock.patch('trollius.base_events.logger') as log: + run_loop() + log.error.assert_called_with( + 'Exception in default exception handler', + exc_info=True) + + def custom_handler(loop, context): + raise ValueError('ham') + + del contexts[:] + loop.set_exception_handler(custom_handler) + with mock.patch('trollius.base_events.logger') as log: + run_loop() + log.error.assert_called_with( + test_utils.MockPattern('Exception in default exception.*' + 'while handling.*in custom'), + exc_info=True) + + # Check that original context was passed to default + # exception handler. + context = contexts[0] + self.assertIn('context', context) + self.assertIs(type(context['context']['exception']), + ZeroDivisionError) + + def test_set_task_factory_invalid(self): + with self.assertRaisesRegex( + TypeError, 'task factory must be a callable or None'): + + self.loop.set_task_factory(1) + + self.assertIsNone(self.loop.get_task_factory()) + + def test_set_task_factory(self): + self.loop._process_events = mock.Mock() + + class MyTask(asyncio.Task): + pass + + @asyncio.coroutine + def coro(): + pass + + factory = lambda loop, coro: MyTask(coro, loop=loop) + + self.assertIsNone(self.loop.get_task_factory()) + self.loop.set_task_factory(factory) + self.assertIs(self.loop.get_task_factory(), factory) + + task = self.loop.create_task(coro()) + self.assertTrue(isinstance(task, MyTask)) + self.loop.run_until_complete(task) + + self.loop.set_task_factory(None) + self.assertIsNone(self.loop.get_task_factory()) + + task = self.loop.create_task(coro()) + self.assertTrue(isinstance(task, asyncio.Task)) + self.assertFalse(isinstance(task, MyTask)) + self.loop.run_until_complete(task) + + def test_env_var_debug(self): + code = '\n'.join(( + 'import trollius', + 'loop = trollius.get_event_loop()', + 'print(loop.get_debug())')) + + sts, stdout, stderr = support.assert_python_ok('-c', code, + TROLLIUSDEBUG='') + self.assertEqual(stdout.rstrip(), b'False') + + sts, stdout, stderr = support.assert_python_ok('-c', code, + TROLLIUSDEBUG='1') + self.assertEqual(stdout.rstrip(), b'True') + + def test_create_task(self): + class MyTask(asyncio.Task): + pass + + @asyncio.coroutine + def test(): + pass + + class EventLoop(base_events.BaseEventLoop): + def create_task(self, coro): + return MyTask(coro, loop=loop) + + loop = EventLoop() + self.set_event_loop(loop) + + coro = test() + task = asyncio.ensure_future(coro, loop=loop) + self.assertIsInstance(task, MyTask) + + # make warnings quiet + task._log_destroy_pending = False + coro.close() + + def test_run_forever_keyboard_interrupt(self): + # Python issue #22601: ensure that the temporary task created by + # run_forever() consumes the KeyboardInterrupt and so don't log + # a warning + @asyncio.coroutine + def raise_keyboard_interrupt(): + raise KeyboardInterrupt + + self.loop._process_events = mock.Mock() + self.loop.call_exception_handler = mock.Mock() + + try: + self.loop.run_until_complete(raise_keyboard_interrupt()) + except KeyboardInterrupt: + pass + self.loop.close() + support.gc_collect() + + self.assertFalse(self.loop.call_exception_handler.called) + + def test_run_until_complete_baseexception(self): + # Python issue #22429: run_until_complete() must not schedule a pending + # call to stop() if the future raised a BaseException + @asyncio.coroutine + def raise_keyboard_interrupt(): + raise KeyboardInterrupt + + self.loop._process_events = mock.Mock() + + try: + self.loop.run_until_complete(raise_keyboard_interrupt()) + except KeyboardInterrupt: + pass + + def func(): + self.loop.stop() + func.called = True + func.called = False + try: + self.loop.call_soon(func) + self.loop.run_forever() + except KeyboardInterrupt: + pass + self.assertTrue(func.called) + + +class MyProto(asyncio.Protocol): + done = None + + def __init__(self, create_future=False): + self.state = 'INITIAL' + self.nbytes = 0 + if create_future: + self.done = asyncio.Future() + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'CONNECTED' + transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n') + + def data_received(self, data): + assert self.state == 'CONNECTED', self.state + self.nbytes += len(data) + + def eof_received(self): + assert self.state == 'CONNECTED', self.state + self.state = 'EOF' + + def connection_lost(self, exc): + assert self.state in ('CONNECTED', 'EOF'), self.state + self.state = 'CLOSED' + if self.done: + self.done.set_result(None) + + +class MyDatagramProto(asyncio.DatagramProtocol): + done = None + + def __init__(self, create_future=False): + self.state = 'INITIAL' + self.nbytes = 0 + if create_future: + self.done = asyncio.Future() + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'INITIALIZED' + + def datagram_received(self, data, addr): + assert self.state == 'INITIALIZED', self.state + self.nbytes += len(data) + + def error_received(self, exc): + assert self.state == 'INITIALIZED', self.state + + def connection_lost(self, exc): + assert self.state == 'INITIALIZED', self.state + self.state = 'CLOSED' + if self.done: + self.done.set_result(None) + + +class BaseEventLoopWithSelectorTests(test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.new_event_loop() + self.set_event_loop(self.loop) + + @mock.patch('trollius.base_events.socket') + def test_create_connection_multiple_errors(self, m_socket): + + class MyProto(asyncio.Protocol): + pass + + @asyncio.coroutine + def getaddrinfo(*args, **kw): + yield From(None) + raise Return([(2, 1, 6, '', ('107.6.106.82', 80)), + (2, 1, 6, '', ('107.6.106.82', 80))]) + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + non_local = { + 'idx': -1, + 'errors': ['err1', 'err2'], + } + + def _socket(*args, **kw): + non_local['idx'] += 1 + raise socket.error(non_local['errors'][non_local['idx']]) + + m_socket.error = socket.error + m_socket.socket = _socket + + self.loop.getaddrinfo = getaddrinfo_task + + coro = self.loop.create_connection(MyProto, 'example.com', 80) + with self.assertRaises(socket.error) as cm: + self.loop.run_until_complete(coro) + + self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2') + + @mock.patch('trollius.base_events.socket') + def test_create_connection_timeout(self, m_socket): + # Ensure that the socket is closed on timeout + sock = mock.Mock() + m_socket.socket.return_value = sock + m_socket.error = socket.error + + def getaddrinfo(*args, **kw): + fut = asyncio.Future(loop=self.loop) + addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '', + ('127.0.0.1', 80)) + fut.set_result([addr]) + return fut + self.loop.getaddrinfo = getaddrinfo + + with mock.patch.object(self.loop, 'sock_connect', + side_effect=asyncio.TimeoutError): + coro = self.loop.create_connection(MyProto, '127.0.0.1', 80) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(coro) + self.assertTrue(sock.close.called) + + def test_create_connection_host_port_sock(self): + coro = self.loop.create_connection( + MyProto, 'example.com', 80, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + + def test_create_connection_no_host_port_sock(self): + coro = self.loop.create_connection(MyProto) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + + def test_create_connection_no_getaddrinfo(self): + @asyncio.coroutine + def getaddrinfo(*args, **kw): + yield From(None) + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + coro = self.loop.create_connection(MyProto, 'example.com', 80) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + def test_create_connection_connect_err(self): + @asyncio.coroutine + def getaddrinfo(*args, **kw): + yield From(None) + raise Return([(2, 1, 6, '', ('107.6.106.82', 80))]) + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + self.loop.sock_connect = mock.Mock() + self.loop.sock_connect.side_effect = socket.error + + coro = self.loop.create_connection(MyProto, 'example.com', 80) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + def test_create_connection_multiple(self): + @asyncio.coroutine + def getaddrinfo(*args, **kw): + return [(2, 1, 6, '', ('0.0.0.1', 80)), + (2, 1, 6, '', ('0.0.0.2', 80))] + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + self.loop.sock_connect = mock.Mock() + self.loop.sock_connect.side_effect = socket.error + + coro = self.loop.create_connection( + MyProto, 'example.com', 80, family=socket.AF_INET) + with self.assertRaises(socket.error): + self.loop.run_until_complete(coro) + + @mock.patch('trollius.base_events.socket') + def test_create_connection_multiple_errors_local_addr(self, m_socket): + + def bind(addr): + if addr[0] == '0.0.0.1': + err = socket.error('Err') + err.strerror = 'Err' + raise err + + m_socket.error = socket.error + m_socket.socket.return_value.bind = bind + + @asyncio.coroutine + def getaddrinfo(*args, **kw): + return [(2, 1, 6, '', ('0.0.0.1', 80)), + (2, 1, 6, '', ('0.0.0.2', 80))] + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + self.loop.sock_connect = mock.Mock() + self.loop.sock_connect.side_effect = socket.error('Err2') + + coro = self.loop.create_connection( + MyProto, 'example.com', 80, family=socket.AF_INET, + local_addr=(None, 8080)) + with self.assertRaises(socket.error) as cm: + self.loop.run_until_complete(coro) + + self.assertTrue(str(cm.exception).startswith('Multiple exceptions: ')) + self.assertTrue(m_socket.socket.return_value.close.called) + + def test_create_connection_no_local_addr(self): + @asyncio.coroutine + def getaddrinfo(host, *args, **kw): + if host == 'example.com': + return [(2, 1, 6, '', ('107.6.106.82', 80)), + (2, 1, 6, '', ('107.6.106.82', 80))] + else: + return [] + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + self.loop.getaddrinfo = getaddrinfo_task + + coro = self.loop.create_connection( + MyProto, 'example.com', 80, family=socket.AF_INET, + local_addr=(None, 8080)) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + def test_create_connection_ssl_server_hostname_default(self): + self.loop.getaddrinfo = mock.Mock() + + def mock_getaddrinfo(*args, **kwds): + f = asyncio.Future(loop=self.loop) + f.set_result([(socket.AF_INET, socket.SOCK_STREAM, + socket.SOL_TCP, '', ('1.2.3.4', 80))]) + return f + + self.loop.getaddrinfo.side_effect = mock_getaddrinfo + self.loop.sock_connect = mock.Mock() + f = asyncio.Future(loop=self.loop) + f.set_result(()) + self.loop.sock_connect.return_value = f + self.loop._make_ssl_transport = mock.Mock() + + class _SelectorTransportMock: + _sock = None + + def get_extra_info(self, key): + return mock.Mock() + + def close(self): + self._sock.close() + + def mock_make_ssl_transport(sock, protocol, sslcontext, waiter, + **kwds): + waiter.set_result(None) + transport = _SelectorTransportMock() + transport._sock = sock + return transport + + self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport + ANY = mock.ANY + # First try the default server_hostname. + self.loop._make_ssl_transport.reset_mock() + coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True) + transport, _ = self.loop.run_until_complete(coro) + transport.close() + self.loop._make_ssl_transport.assert_called_with( + ANY, ANY, ANY, ANY, + server_side=False, + server_hostname='python.org') + # Next try an explicit server_hostname. + self.loop._make_ssl_transport.reset_mock() + coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True, + server_hostname='perl.com') + transport, _ = self.loop.run_until_complete(coro) + transport.close() + self.loop._make_ssl_transport.assert_called_with( + ANY, ANY, ANY, ANY, + server_side=False, + server_hostname='perl.com') + # Finally try an explicit empty server_hostname. + self.loop._make_ssl_transport.reset_mock() + coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True, + server_hostname='') + transport, _ = self.loop.run_until_complete(coro) + transport.close() + self.loop._make_ssl_transport.assert_called_with(ANY, ANY, ANY, ANY, + server_side=False, + server_hostname='') + + def test_create_connection_no_ssl_server_hostname_errors(self): + # When not using ssl, server_hostname must be None. + coro = self.loop.create_connection(MyProto, 'python.org', 80, + server_hostname='') + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + coro = self.loop.create_connection(MyProto, 'python.org', 80, + server_hostname='python.org') + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + + def test_create_connection_ssl_server_hostname_errors(self): + # When using ssl, server_hostname may be None if host is non-empty. + coro = self.loop.create_connection(MyProto, '', 80, ssl=True) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + coro = self.loop.create_connection(MyProto, None, 80, ssl=True) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + sock = socket.socket() + coro = self.loop.create_connection(MyProto, None, None, + ssl=True, sock=sock) + self.addCleanup(sock.close) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + + def test_create_server_empty_host(self): + # if host is empty string use None instead + non_local = {'host': object()} + + @asyncio.coroutine + def getaddrinfo(*args, **kw): + non_local['host'] = args[0] + yield From(None) + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + fut = self.loop.create_server(MyProto, '', 0) + self.assertRaises(socket.error, self.loop.run_until_complete, fut) + self.assertIsNone(non_local['host']) + + def test_create_server_host_port_sock(self): + fut = self.loop.create_server( + MyProto, '0.0.0.0', 0, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_server_no_host_port_sock(self): + fut = self.loop.create_server(MyProto) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_server_no_getaddrinfo(self): + @asyncio.coroutine + def getaddrinfo(*args, **kw): + raise Return([]) + + def getaddrinfo_task(*args, **kwds): + return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) + + self.loop.getaddrinfo = getaddrinfo_task + + f = self.loop.create_server(MyProto, '0.0.0.0', 0) + self.assertRaises(socket.error, self.loop.run_until_complete, f) + + @mock.patch('trollius.base_events.socket') + def test_create_server_cant_bind(self, m_socket): + + class Err(socket.error): + strerror = 'error' + + m_socket.error = socket.error + m_socket.getaddrinfo.return_value = [ + (2, 1, 6, '', ('127.0.0.1', 10100))] + m_socket.getaddrinfo._is_coroutine = False + m_sock = m_socket.socket.return_value = mock.Mock() + m_sock.bind.side_effect = Err + + fut = self.loop.create_server(MyProto, '0.0.0.0', 0) + self.assertRaises(socket.error, self.loop.run_until_complete, fut) + self.assertTrue(m_sock.close.called) + + @mock.patch('trollius.base_events.socket') + def test_create_datagram_endpoint_no_addrinfo(self, m_socket): + m_socket.error = socket.error + m_socket.getaddrinfo.return_value = [] + m_socket.getaddrinfo._is_coroutine = False + + coro = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('localhost', 0)) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + def test_create_datagram_endpoint_addr_error(self): + coro = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr='localhost') + self.assertRaises( + AssertionError, self.loop.run_until_complete, coro) + coro = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('localhost', 1, 2, 3)) + self.assertRaises( + AssertionError, self.loop.run_until_complete, coro) + + def test_create_datagram_endpoint_connect_err(self): + self.loop.sock_connect = mock.Mock() + self.loop.sock_connect.side_effect = socket.error + + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0)) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + @mock.patch('trollius.base_events.socket') + def test_create_datagram_endpoint_socket_err(self, m_socket): + m_socket.error = socket.error + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.socket.side_effect = socket.error + + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol, family=socket.AF_INET) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0)) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + + @unittest.skipUnless(support.IPV6_ENABLED, + 'IPv6 not supported or enabled') + def test_create_datagram_endpoint_no_matching_family(self): + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol, + remote_addr=('127.0.0.1', 0), local_addr=('::1', 0)) + self.assertRaises( + ValueError, self.loop.run_until_complete, coro) + + @mock.patch('trollius.base_events.socket') + def test_create_datagram_endpoint_setblk_err(self, m_socket): + m_socket.error = socket.error + m_socket.socket.return_value.setblocking.side_effect = socket.error + + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol, family=socket.AF_INET) + self.assertRaises( + socket.error, self.loop.run_until_complete, coro) + self.assertTrue( + m_socket.socket.return_value.close.called) + + def test_create_datagram_endpoint_noaddr_nofamily(self): + coro = self.loop.create_datagram_endpoint( + asyncio.DatagramProtocol) + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + + @mock.patch('trollius.base_events.socket') + def test_create_datagram_endpoint_cant_bind(self, m_socket): + class Err(socket.error): + pass + + m_socket.AF_INET6 = socket.AF_INET6 + m_socket.error = socket.error + m_socket.getaddrinfo = socket.getaddrinfo + m_sock = m_socket.socket.return_value = mock.Mock() + m_sock.bind.side_effect = Err + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, + local_addr=('127.0.0.1', 0), family=socket.AF_INET) + self.assertRaises(Err, self.loop.run_until_complete, fut) + self.assertTrue(m_sock.close.called) + + def test_accept_connection_retry(self): + sock = mock.Mock() + sock.accept.side_effect = BlockingIOError() + + self.loop._accept_connection(MyProto, sock) + self.assertFalse(sock.close.called) + + @mock.patch('trollius.base_events.logger') + def test_accept_connection_exception(self, m_log): + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.accept.side_effect = socket.error(errno.EMFILE, 'Too many open files') + self.loop.remove_reader = mock.Mock() + self.loop.call_later = mock.Mock() + + self.loop._accept_connection(MyProto, sock) + self.assertTrue(m_log.error.called) + self.assertFalse(sock.close.called) + self.loop.remove_reader.assert_called_with(10) + self.loop.call_later.assert_called_with(constants.ACCEPT_RETRY_DELAY, + # self.loop._start_serving + mock.ANY, + MyProto, sock, None, None) + + def test_call_coroutine(self): + @asyncio.coroutine + def simple_coroutine(): + pass + + coro_func = simple_coroutine + coro_obj = coro_func() + self.addCleanup(coro_obj.close) + for func in (coro_func, coro_obj): + with self.assertRaises(TypeError): + self.loop.call_soon(func) + with self.assertRaises(TypeError): + self.loop.call_soon_threadsafe(func) + with self.assertRaises(TypeError): + self.loop.call_later(60, func) + with self.assertRaises(TypeError): + self.loop.call_at(self.loop.time() + 60, func) + with self.assertRaises(TypeError): + self.loop.run_in_executor(None, func) + + @mock.patch('trollius.base_events.logger') + def test_log_slow_callbacks(self, m_logger): + def stop_loop_cb(loop): + loop.stop() + + @asyncio.coroutine + def stop_loop_coro(loop): + yield From(None) + loop.stop() + + asyncio.set_event_loop(self.loop) + self.loop.set_debug(True) + self.loop.slow_callback_duration = 0.0 + + # slow callback + self.loop.call_soon(stop_loop_cb, self.loop) + self.loop.run_forever() + fmt = m_logger.warning.call_args[0][0] + args = m_logger.warning.call_args[0][1:] + self.assertRegex(fmt % tuple(args), + "^Executing " + "took .* seconds$") + + # slow task + asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop) + self.loop.run_forever() + fmt = m_logger.warning.call_args[0][0] + args = m_logger.warning.call_args[0][1:] + self.assertRegex(fmt % tuple(args), + "^Executing " + "took .* seconds$") + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_events.py b/tests/test_events.py new file mode 100644 index 00000000..59c25d4d --- /dev/null +++ b/tests/test_events.py @@ -0,0 +1,2434 @@ +"""Tests for events.py.""" + +import contextlib +import functools +import gc +import io +import os +import platform +import re +import signal +import six +import socket +import subprocess +import sys +import threading +import errno +import weakref + +try: + import ssl +except ImportError: + ssl = None + +try: + import concurrent +except ImportError: + concurrent = None + +from trollius import Return, From +from trollius import futures + +import trollius as asyncio +from trollius import compat +from trollius import events +from trollius import proactor_events +from trollius import selector_events +from trollius import sslproto +from trollius import test_support as support +from trollius import test_utils +from trollius.test_utils import unittest +from trollius.py33_exceptions import (wrap_error, + BlockingIOError, ConnectionRefusedError, + FileNotFoundError) +from trollius.test_utils import mock +from trollius.time_monotonic import time_monotonic + + +def data_file(filename): + if hasattr(support, 'TEST_HOME_DIR'): + fullname = os.path.join(support.TEST_HOME_DIR, filename) + if os.path.isfile(fullname): + return fullname + fullname = os.path.join(os.path.dirname(__file__), filename) + if os.path.isfile(fullname): + return fullname + raise FileNotFoundError(filename) + + +def osx_tiger(): + """Return True if the platform is Mac OS 10.4 or older.""" + if sys.platform != 'darwin': + return False + version = platform.mac_ver()[0] + version = tuple(map(int, version.split('.'))) + return version < (10, 5) + + +def skip_if_backported_sslcontext(): + backported = getattr(asyncio, 'BACKPORT_SSL_CONTEXT', False) + return unittest.skipIf(backported, 'need ssl.SSLContext') + + +ONLYCERT = data_file('ssl_cert.pem') +ONLYKEY = data_file('ssl_key.pem') +SIGNED_CERTFILE = data_file('keycert3.pem') +SIGNING_CA = data_file('pycacert.pem') + + +class MyBaseProto(asyncio.Protocol): + connected = None + done = None + + def __init__(self, loop=None): + self.transport = None + self.state = 'INITIAL' + self.nbytes = 0 + if loop is not None: + self.connected = asyncio.Future(loop=loop) + self.done = asyncio.Future(loop=loop) + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'CONNECTED' + if self.connected: + self.connected.set_result(None) + + def data_received(self, data): + assert self.state == 'CONNECTED', self.state + self.nbytes += len(data) + + def eof_received(self): + assert self.state == 'CONNECTED', self.state + self.state = 'EOF' + + def connection_lost(self, exc): + assert self.state in ('CONNECTED', 'EOF'), self.state + self.state = 'CLOSED' + if self.done: + self.done.set_result(None) + + +class MyProto(MyBaseProto): + def connection_made(self, transport): + super(MyProto, self).connection_made(transport) + transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n') + + +class MyDatagramProto(asyncio.DatagramProtocol): + done = None + + def __init__(self, loop=None): + self.state = 'INITIAL' + self.nbytes = 0 + if loop is not None: + self.done = asyncio.Future(loop=loop) + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'INITIALIZED' + + def datagram_received(self, data, addr): + assert self.state == 'INITIALIZED', self.state + self.nbytes += len(data) + + def error_received(self, exc): + assert self.state == 'INITIALIZED', self.state + + def connection_lost(self, exc): + assert self.state == 'INITIALIZED', self.state + self.state = 'CLOSED' + if self.done: + self.done.set_result(None) + + +class MyReadPipeProto(asyncio.Protocol): + done = None + + def __init__(self, loop=None): + self.state = ['INITIAL'] + self.nbytes = 0 + self.transport = None + if loop is not None: + self.done = asyncio.Future(loop=loop) + + def connection_made(self, transport): + self.transport = transport + assert self.state == ['INITIAL'], self.state + self.state.append('CONNECTED') + + def data_received(self, data): + assert self.state == ['INITIAL', 'CONNECTED'], self.state + self.nbytes += len(data) + + def eof_received(self): + assert self.state == ['INITIAL', 'CONNECTED'], self.state + self.state.append('EOF') + + def connection_lost(self, exc): + if 'EOF' not in self.state: + self.state.append('EOF') # It is okay if EOF is missed. + assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state + self.state.append('CLOSED') + if self.done: + self.done.set_result(None) + + +class MyWritePipeProto(asyncio.BaseProtocol): + done = None + + def __init__(self, loop=None): + self.state = 'INITIAL' + self.transport = None + if loop is not None: + self.done = asyncio.Future(loop=loop) + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'CONNECTED' + + def connection_lost(self, exc): + assert self.state == 'CONNECTED', self.state + self.state = 'CLOSED' + if self.done: + self.done.set_result(None) + + +class MySubprocessProtocol(asyncio.SubprocessProtocol): + + def __init__(self, loop): + self.state = 'INITIAL' + self.transport = None + self.connected = asyncio.Future(loop=loop) + self.completed = asyncio.Future(loop=loop) + self.disconnects = dict((fd, futures.Future(loop=loop)) for fd in range(3)) + self.data = {1: b'', 2: b''} + self.returncode = None + self.got_data = {1: asyncio.Event(loop=loop), + 2: asyncio.Event(loop=loop)} + + def connection_made(self, transport): + self.transport = transport + assert self.state == 'INITIAL', self.state + self.state = 'CONNECTED' + self.connected.set_result(None) + + def connection_lost(self, exc): + assert self.state == 'CONNECTED', self.state + self.state = 'CLOSED' + self.completed.set_result(None) + + def pipe_data_received(self, fd, data): + assert self.state == 'CONNECTED', self.state + self.data[fd] += data + self.got_data[fd].set() + + def pipe_connection_lost(self, fd, exc): + assert self.state == 'CONNECTED', self.state + if exc: + self.disconnects[fd].set_exception(exc) + else: + self.disconnects[fd].set_result(exc) + + def process_exited(self): + assert self.state == 'CONNECTED', self.state + self.returncode = self.transport.get_returncode() + + +class EventLoopTestsMixin(object): + + def setUp(self): + super(EventLoopTestsMixin, self).setUp() + self.loop = self.create_event_loop() + self.set_event_loop(self.loop) + + def tearDown(self): + # just in case if we have transport close callbacks + if not self.loop.is_closed(): + test_utils.run_briefly(self.loop) + + self.loop.close() + gc.collect() + super(EventLoopTestsMixin, self).tearDown() + + def test_run_until_complete_nesting(self): + @asyncio.coroutine + def coro1(): + yield From(None) + + @asyncio.coroutine + def coro2(): + self.assertTrue(self.loop.is_running()) + self.loop.run_until_complete(coro1()) + + self.assertRaises( + RuntimeError, self.loop.run_until_complete, coro2()) + + # Note: because of the default Windows timing granularity of + # 15.6 msec, we use fairly long sleep times here (~100 msec). + + def test_run_until_complete(self): + t0 = self.loop.time() + self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop)) + t1 = self.loop.time() + self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0) + + def test_run_until_complete_stopped(self): + @asyncio.coroutine + def cb(): + self.loop.stop() + yield From(asyncio.sleep(0.1, loop=self.loop)) + + task = cb() + self.assertRaises(RuntimeError, + self.loop.run_until_complete, task) + for task in asyncio.Task.all_tasks(loop=self.loop): + task._log_destroy_pending = False + + def test_call_later(self): + results = [] + + def callback(arg): + results.append(arg) + self.loop.stop() + + self.loop.call_later(0.1, callback, 'hello world') + t0 = time_monotonic() + self.loop.run_forever() + t1 = time_monotonic() + self.assertEqual(results, ['hello world']) + self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0) + + def test_call_soon(self): + results = [] + + def callback(arg1, arg2): + results.append((arg1, arg2)) + self.loop.stop() + + self.loop.call_soon(callback, 'hello', 'world') + self.loop.run_forever() + self.assertEqual(results, [('hello', 'world')]) + + def test_call_soon_threadsafe(self): + results = [] + lock = threading.Lock() + + def callback(arg): + results.append(arg) + if len(results) >= 2: + self.loop.stop() + + def run_in_thread(): + self.loop.call_soon_threadsafe(callback, 'hello') + lock.release() + + lock.acquire() + t = threading.Thread(target=run_in_thread) + t.start() + + with lock: + self.loop.call_soon(callback, 'world') + self.loop.run_forever() + t.join() + self.assertEqual(results, ['hello', 'world']) + + def test_call_soon_threadsafe_same_thread(self): + results = [] + + def callback(arg): + results.append(arg) + if len(results) >= 2: + self.loop.stop() + + self.loop.call_soon_threadsafe(callback, 'hello') + self.loop.call_soon(callback, 'world') + self.loop.run_forever() + self.assertEqual(results, ['hello', 'world']) + + @unittest.skipIf(concurrent is None, 'need concurrent.futures') + def test_run_in_executor(self): + def run(arg): + return (arg, threading.current_thread().ident) + f2 = self.loop.run_in_executor(None, run, 'yo') + res, thread_id = self.loop.run_until_complete(f2) + self.assertEqual(res, 'yo') + self.assertNotEqual(thread_id, threading.current_thread().ident) + + def test_reader_callback(self): + r, w = test_utils.socketpair() + r.setblocking(False) + bytes_read = bytearray() + + def reader(): + try: + data = r.recv(1024) + except BlockingIOError: + # Spurious readiness notifications are possible + # at least on Linux -- see man select. + return + if data: + bytes_read.extend(data) + else: + self.assertTrue(self.loop.remove_reader(r.fileno())) + r.close() + + self.loop.add_reader(r.fileno(), reader) + self.loop.call_soon(w.send, b'abc') + test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3) + self.loop.call_soon(w.send, b'def') + test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6) + self.loop.call_soon(w.close) + self.loop.call_soon(self.loop.stop) + self.loop.run_forever() + self.assertEqual(bytes_read, b'abcdef') + + def test_writer_callback(self): + r, w = test_utils.socketpair() + w.setblocking(False) + + def writer(data): + w.send(data) + self.loop.stop() + + data = b'x' * 1024 + self.loop.add_writer(w.fileno(), writer, data) + self.loop.run_forever() + + self.assertTrue(self.loop.remove_writer(w.fileno())) + self.assertFalse(self.loop.remove_writer(w.fileno())) + + w.close() + read = r.recv(len(data) * 2) + r.close() + self.assertEqual(read, data) + + def _basetest_sock_client_ops(self, httpd, sock): + if not isinstance(self.loop, proactor_events.BaseProactorEventLoop): + # in debug mode, socket operations must fail + # if the socket is not in blocking mode + self.loop.set_debug(True) + sock.setblocking(True) + with self.assertRaises(ValueError): + self.loop.run_until_complete( + self.loop.sock_connect(sock, httpd.address)) + with self.assertRaises(ValueError): + self.loop.run_until_complete( + self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n')) + with self.assertRaises(ValueError): + self.loop.run_until_complete( + self.loop.sock_recv(sock, 1024)) + with self.assertRaises(ValueError): + self.loop.run_until_complete( + self.loop.sock_accept(sock)) + + # test in non-blocking mode + sock.setblocking(False) + self.loop.run_until_complete( + self.loop.sock_connect(sock, httpd.address)) + self.loop.run_until_complete( + self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n')) + data = self.loop.run_until_complete( + self.loop.sock_recv(sock, 1024)) + # consume data + self.loop.run_until_complete( + self.loop.sock_recv(sock, 1024)) + sock.close() + self.assertTrue(data.startswith(b'HTTP/1.0 200 OK')) + + def test_sock_client_ops(self): + with test_utils.run_test_server() as httpd: + sock = socket.socket() + self._basetest_sock_client_ops(httpd, sock) + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_unix_sock_client_ops(self): + with test_utils.run_test_unix_server() as httpd: + sock = socket.socket(socket.AF_UNIX) + self._basetest_sock_client_ops(httpd, sock) + + def test_sock_client_fail(self): + # Make sure that we will get an unused port + address = None + try: + s = socket.socket() + s.bind(('127.0.0.1', 0)) + address = s.getsockname() + finally: + s.close() + + sock = socket.socket() + sock.setblocking(False) + with self.assertRaises(ConnectionRefusedError): + self.loop.run_until_complete( + self.loop.sock_connect(sock, address)) + sock.close() + + def test_sock_accept(self): + listener = socket.socket() + listener.setblocking(False) + listener.bind(('127.0.0.1', 0)) + listener.listen(1) + client = socket.socket() + client.connect(listener.getsockname()) + + f = self.loop.sock_accept(listener) + conn, addr = self.loop.run_until_complete(f) + self.assertEqual(conn.gettimeout(), 0) + self.assertEqual(addr, client.getsockname()) + self.assertEqual(client.getpeername(), listener.getsockname()) + client.close() + conn.close() + listener.close() + + @unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL') + def test_add_signal_handler(self): + non_local = {'caught': 0} + + def my_handler(): + non_local['caught'] += 1 + + # Check error behavior first. + self.assertRaises( + TypeError, self.loop.add_signal_handler, 'boom', my_handler) + self.assertRaises( + TypeError, self.loop.remove_signal_handler, 'boom') + self.assertRaises( + ValueError, self.loop.add_signal_handler, signal.NSIG+1, + my_handler) + self.assertRaises( + ValueError, self.loop.remove_signal_handler, signal.NSIG+1) + self.assertRaises( + ValueError, self.loop.add_signal_handler, 0, my_handler) + self.assertRaises( + ValueError, self.loop.remove_signal_handler, 0) + self.assertRaises( + ValueError, self.loop.add_signal_handler, -1, my_handler) + self.assertRaises( + ValueError, self.loop.remove_signal_handler, -1) + self.assertRaises( + RuntimeError, self.loop.add_signal_handler, signal.SIGKILL, + my_handler) + # Removing SIGKILL doesn't raise, since we don't call signal(). + self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL)) + # Now set a handler and handle it. + self.loop.add_signal_handler(signal.SIGINT, my_handler) + + os.kill(os.getpid(), signal.SIGINT) + test_utils.run_until(self.loop, lambda: non_local['caught']) + + # Removing it should restore the default handler. + self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT)) + self.assertEqual(signal.getsignal(signal.SIGINT), + signal.default_int_handler) + # Removing again returns False. + self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT)) + + @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM') + def test_signal_handling_while_selecting(self): + # Test with a signal actually arriving during a select() call. + non_local = {'caught': 0} + + def my_handler(): + non_local['caught'] += 1 + self.loop.stop() + + self.loop.add_signal_handler(signal.SIGALRM, my_handler) + + signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once. + self.loop.run_forever() + self.assertEqual(non_local['caught'], 1) + + @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM') + def test_signal_handling_args(self): + some_args = (42,) + non_local = {'caught': 0} + + def my_handler(*args): + non_local['caught'] += 1 + self.assertEqual(args, some_args) + + self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args) + + signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once. + self.loop.call_later(0.5, self.loop.stop) + self.loop.run_forever() + self.assertEqual(non_local['caught'], 1) + + def _basetest_create_connection(self, connection_fut, check_sockname=True): + tr, pr = self.loop.run_until_complete(connection_fut) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, asyncio.Protocol) + self.assertIs(pr.transport, tr) + if check_sockname: + self.assertIsNotNone(tr.get_extra_info('sockname')) + self.loop.run_until_complete(pr.done) + self.assertGreater(pr.nbytes, 0) + tr.close() + + def test_create_connection(self): + with test_utils.run_test_server() as httpd: + conn_fut = self.loop.create_connection( + lambda: MyProto(loop=self.loop), *httpd.address) + self._basetest_create_connection(conn_fut) + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_unix_connection(self): + # Issue #20682: On Mac OS X Tiger, getsockname() returns a + # zero-length address for UNIX socket. + check_sockname = not osx_tiger() + + with test_utils.run_test_unix_server() as httpd: + conn_fut = self.loop.create_unix_connection( + lambda: MyProto(loop=self.loop), httpd.address) + self._basetest_create_connection(conn_fut, check_sockname) + + def test_create_connection_sock(self): + with test_utils.run_test_server() as httpd: + sock = None + infos = self.loop.run_until_complete( + self.loop.getaddrinfo( + *httpd.address, type=socket.SOCK_STREAM)) + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + self.loop.run_until_complete( + self.loop.sock_connect(sock, address)) + except: + pass + else: + break + else: + assert False, 'Can not create socket.' + + f = self.loop.create_connection( + lambda: MyProto(loop=self.loop), sock=sock) + tr, pr = self.loop.run_until_complete(f) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, asyncio.Protocol) + self.loop.run_until_complete(pr.done) + self.assertGreater(pr.nbytes, 0) + tr.close() + + def _basetest_create_ssl_connection(self, connection_fut, + check_sockname=True): + tr, pr = self.loop.run_until_complete(connection_fut) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, asyncio.Protocol) + self.assertTrue('ssl' in tr.__class__.__name__.lower()) + if check_sockname: + self.assertIsNotNone(tr.get_extra_info('sockname')) + self.loop.run_until_complete(pr.done) + self.assertGreater(pr.nbytes, 0) + tr.close() + + def _test_create_ssl_connection(self, httpd, create_connection, + check_sockname=True): + conn_fut = create_connection(ssl=test_utils.dummy_ssl_context()) + self._basetest_create_ssl_connection(conn_fut, check_sockname) + + # ssl.Purpose was introduced in Python 3.4 + if hasattr(ssl, 'Purpose'): + def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, + cafile=None, capath=None, + cadata=None): + """ + A ssl.create_default_context() replacement that doesn't enable + cert validation. + """ + self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH) + return test_utils.dummy_ssl_context() + + # With ssl=True, ssl.create_default_context() should be called + with mock.patch('ssl.create_default_context', + side_effect=_dummy_ssl_create_context) as m: + conn_fut = create_connection(ssl=True) + self._basetest_create_ssl_connection(conn_fut, check_sockname) + self.assertEqual(m.call_count, 1) + + if not asyncio.BACKPORT_SSL_CONTEXT: + # With the real ssl.create_default_context(), certificate + # validation will fail + with self.assertRaises(ssl.SSLError) as cm: + conn_fut = create_connection(ssl=True) + # Ignore the "SSL handshake failed" log in debug mode + with test_utils.disable_logger(): + self._basetest_create_ssl_connection(conn_fut, check_sockname) + + # Test for Python 3.2 + if hasattr(ssl.SSLError, 'reason'): + self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED') + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_create_ssl_connection(self): + with test_utils.run_test_server(use_ssl=True) as httpd: + create_connection = functools.partial( + self.loop.create_connection, + lambda: MyProto(loop=self.loop), + *httpd.address) + self._test_create_ssl_connection(httpd, create_connection) + + def test_legacy_create_ssl_connection(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_ssl_connection() + + @unittest.skipIf(ssl is None, 'No ssl module') + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_ssl_unix_connection(self): + # Issue #20682: On Mac OS X Tiger, getsockname() returns a + # zero-length address for UNIX socket. + check_sockname = not osx_tiger() + + with test_utils.run_test_unix_server(use_ssl=True) as httpd: + create_connection = functools.partial( + self.loop.create_unix_connection, + lambda: MyProto(loop=self.loop), httpd.address, + server_hostname='127.0.0.1') + + self._test_create_ssl_connection(httpd, create_connection, + check_sockname) + + def test_legacy_create_ssl_unix_connection(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_ssl_unix_connection() + + def test_create_connection_local_addr(self): + with test_utils.run_test_server() as httpd: + port = support.find_unused_port() + f = self.loop.create_connection( + lambda: MyProto(loop=self.loop), + *httpd.address, local_addr=(httpd.address[0], port)) + tr, pr = self.loop.run_until_complete(f) + expected = pr.transport.get_extra_info('sockname')[1] + self.assertEqual(port, expected) + tr.close() + + def test_create_connection_local_addr_in_use(self): + with test_utils.run_test_server() as httpd: + f = self.loop.create_connection( + lambda: MyProto(loop=self.loop), + *httpd.address, local_addr=httpd.address) + with self.assertRaises(socket.error) as cm: + self.loop.run_until_complete(f) + self.assertEqual(cm.exception.errno, errno.EADDRINUSE) + # FIXME: address missing from the message? + #self.assertIn(str(httpd.address), cm.exception.strerror) + + def test_create_server(self): + proto = MyProto(self.loop) + f = self.loop.create_server(lambda: proto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + host, port = sock.getsockname() + self.assertEqual(host, '0.0.0.0') + client = socket.socket() + client.connect(('127.0.0.1', port)) + client.sendall(b'xxx') + + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + + test_utils.run_until(self.loop, lambda: proto.nbytes > 0) + self.assertEqual(3, proto.nbytes) + + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('sockname')) + self.assertEqual('127.0.0.1', + proto.transport.get_extra_info('peername')[0]) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + + self.assertEqual('CLOSED', proto.state) + + # the client socket must be closed after to avoid ECONNRESET upon + # recv()/send() on the serving socket + client.close() + + # close server + server.close() + + def _make_unix_server(self, factory, **kwargs): + path = test_utils.gen_unix_socket_path() + self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) + + f = self.loop.create_unix_server(factory, path, **kwargs) + server = self.loop.run_until_complete(f) + + return server, path + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_unix_server(self): + proto = MyProto(loop=self.loop) + server, path = self._make_unix_server(lambda: proto) + self.assertEqual(len(server.sockets), 1) + + client = socket.socket(socket.AF_UNIX) + client.connect(path) + client.sendall(b'xxx') + + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + test_utils.run_until(self.loop, lambda: proto.nbytes > 0) + self.assertEqual(3, proto.nbytes) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + + self.assertEqual('CLOSED', proto.state) + + # the client socket must be closed after to avoid ECONNRESET upon + # recv()/send() on the serving socket + client.close() + + # close server + server.close() + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_unix_server_path_socket_error(self): + proto = MyProto(loop=self.loop) + sock = socket.socket() + try: + f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock) + with self.assertRaisesRegex(ValueError, + 'path and sock can not be specified ' + 'at the same time'): + self.loop.run_until_complete(f) + finally: + sock.close() + + def _create_ssl_context(self, certfile, keyfile=None): + sslcontext = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + if not asyncio.BACKPORT_SSL_CONTEXT: + sslcontext.options |= ssl.OP_NO_SSLv2 + sslcontext.load_cert_chain(certfile, keyfile) + return sslcontext + + def _make_ssl_server(self, factory, certfile, keyfile=None): + sslcontext = self._create_ssl_context(certfile, keyfile) + + f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext) + server = self.loop.run_until_complete(f) + + sock = server.sockets[0] + host, port = sock.getsockname() + self.assertEqual(host, '127.0.0.1') + return server, host, port + + def _make_ssl_unix_server(self, factory, certfile, keyfile=None): + sslcontext = self._create_ssl_context(certfile, keyfile) + return self._make_unix_server(factory, ssl=sslcontext) + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_create_server_ssl(self): + proto = MyProto(loop=self.loop) + server, host, port = self._make_ssl_server( + lambda: proto, ONLYCERT, ONLYKEY) + + f_c = self.loop.create_connection(MyBaseProto, host, port, + ssl=test_utils.dummy_ssl_context()) + client, pr = self.loop.run_until_complete(f_c) + + client.write(b'xxx') + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + + test_utils.run_until(self.loop, lambda: proto.nbytes > 0) + self.assertEqual(3, proto.nbytes) + + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('sockname')) + self.assertEqual('127.0.0.1', + proto.transport.get_extra_info('peername')[0]) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + self.assertEqual('CLOSED', proto.state) + + # the client socket must be closed after to avoid ECONNRESET upon + # recv()/send() on the serving socket + client.close() + + # stop serving + server.close() + + def test_legacy_create_server_ssl(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_server_ssl() + + @unittest.skipIf(ssl is None, 'No ssl module') + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_unix_server_ssl(self): + proto = MyProto(loop=self.loop) + server, path = self._make_ssl_unix_server( + lambda: proto, ONLYCERT, ONLYKEY) + + f_c = self.loop.create_unix_connection( + MyBaseProto, path, ssl=test_utils.dummy_ssl_context(), + server_hostname='') + + client, pr = self.loop.run_until_complete(f_c) + + client.write(b'xxx') + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + test_utils.run_until(self.loop, lambda: proto.nbytes > 0) + self.assertEqual(3, proto.nbytes) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + self.assertEqual('CLOSED', proto.state) + + # the client socket must be closed after to avoid ECONNRESET upon + # recv()/send() on the serving socket + client.close() + + # stop serving + server.close() + + def test_legacy_create_unix_server_ssl(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_unix_server_ssl() + + @unittest.skipIf(ssl is None, 'No ssl module') + @skip_if_backported_sslcontext() + def test_create_server_ssl_verify_failed(self): + proto = MyProto(loop=self.loop) + server, host, port = self._make_ssl_server( + lambda: proto, SIGNED_CERTFILE) + + sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + sslcontext_client.options |= ssl.OP_NO_SSLv2 + sslcontext_client.verify_mode = ssl.CERT_REQUIRED + if hasattr(sslcontext_client, 'check_hostname'): + sslcontext_client.check_hostname = True + + # no CA loaded + f_c = self.loop.create_connection(MyProto, host, port, + ssl=sslcontext_client) + with mock.patch.object(self.loop, 'call_exception_handler'): + with test_utils.disable_logger(): + with self.assertRaisesRegex(ssl.SSLError, + 'certificate verify failed'): + self.loop.run_until_complete(f_c) + + # execute the loop to log the connection error + test_utils.run_briefly(self.loop) + + # close connection + self.assertIsNone(proto.transport) + server.close() + + def test_legacy_create_server_ssl_verify_failed(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_server_ssl_verify_failed() + + @unittest.skipIf(ssl is None, 'No ssl module') + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + @skip_if_backported_sslcontext() + def test_create_unix_server_ssl_verify_failed(self): + proto = MyProto(loop=self.loop) + server, path = self._make_ssl_unix_server( + lambda: proto, SIGNED_CERTFILE) + + sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + sslcontext_client.options |= ssl.OP_NO_SSLv2 + sslcontext_client.verify_mode = ssl.CERT_REQUIRED + if hasattr(sslcontext_client, 'check_hostname'): + sslcontext_client.check_hostname = True + + # no CA loaded + f_c = self.loop.create_unix_connection(MyProto, path, + ssl=sslcontext_client, + server_hostname='invalid') + with mock.patch.object(self.loop, 'call_exception_handler'): + with test_utils.disable_logger(): + with self.assertRaisesRegex(ssl.SSLError, + 'certificate verify failed'): + self.loop.run_until_complete(f_c) + + # execute the loop to log the connection error + test_utils.run_briefly(self.loop) + + # close connection + self.assertIsNone(proto.transport) + server.close() + + def test_legacy_create_unix_server_ssl_verify_failed(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_unix_server_ssl_verify_failed() + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_create_server_ssl_match_failed(self): + proto = MyProto(loop=self.loop) + server, host, port = self._make_ssl_server( + lambda: proto, SIGNED_CERTFILE) + + sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + if not asyncio.BACKPORT_SSL_CONTEXT: + sslcontext_client.options |= ssl.OP_NO_SSLv2 + sslcontext_client.verify_mode = ssl.CERT_REQUIRED + sslcontext_client.load_verify_locations( + cafile=SIGNING_CA) + if hasattr(sslcontext_client, 'check_hostname'): + sslcontext_client.check_hostname = True + + if six.PY3: + err_msg = "hostname '127.0.0.1' doesn't match 'localhost'" + else: + # http://bugs.python.org/issue22861 + err_msg = "hostname '127.0.0.1' doesn't match u'localhost'" + + # incorrect server_hostname + if not asyncio.BACKPORT_SSL_CONTEXT: + f_c = self.loop.create_connection(MyProto, host, port, + ssl=sslcontext_client) + with mock.patch.object(self.loop, 'call_exception_handler'): + with test_utils.disable_logger(): + with self.assertRaisesRegex( + ssl.CertificateError, + err_msg): + self.loop.run_until_complete(f_c) + + # close connection + proto.transport.close() + + server.close() + + def test_legacy_create_server_ssl_match_failed(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_server_ssl_match_failed() + + @unittest.skipIf(ssl is None, 'No ssl module') + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_create_unix_server_ssl_verified(self): + proto = MyProto(loop=self.loop) + server, path = self._make_ssl_unix_server( + lambda: proto, SIGNED_CERTFILE) + + sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + if not asyncio.BACKPORT_SSL_CONTEXT: + sslcontext_client.options |= ssl.OP_NO_SSLv2 + sslcontext_client.verify_mode = ssl.CERT_REQUIRED + sslcontext_client.load_verify_locations(cafile=SIGNING_CA) + if hasattr(sslcontext_client, 'check_hostname'): + sslcontext_client.check_hostname = True + + # Connection succeeds with correct CA and server hostname. + f_c = self.loop.create_unix_connection(MyProto, path, + ssl=sslcontext_client, + server_hostname='localhost') + client, pr = self.loop.run_until_complete(f_c) + + # close connection + proto.transport.close() + client.close() + server.close() + self.loop.run_until_complete(proto.done) + + def test_legacy_create_unix_server_ssl_verified(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_unix_server_ssl_verified() + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_create_server_ssl_verified(self): + proto = MyProto(loop=self.loop) + server, host, port = self._make_ssl_server( + lambda: proto, SIGNED_CERTFILE) + + sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23) + if not asyncio.BACKPORT_SSL_CONTEXT: + sslcontext_client.options |= ssl.OP_NO_SSLv2 + sslcontext_client.verify_mode = ssl.CERT_REQUIRED + sslcontext_client.load_verify_locations(cafile=SIGNING_CA) + if hasattr(sslcontext_client, 'check_hostname'): + sslcontext_client.check_hostname = True + + # Connection succeeds with correct CA and server hostname. + f_c = self.loop.create_connection(MyProto, host, port, + ssl=sslcontext_client, + server_hostname='localhost') + client, pr = self.loop.run_until_complete(f_c) + + # close connection + proto.transport.close() + client.close() + + server.close() + self.loop.run_until_complete(proto.done) + + def test_legacy_create_server_ssl_verified(self): + with test_utils.force_legacy_ssl_support(): + self.test_create_server_ssl_verified() + + def test_create_server_sock(self): + non_local = {'proto': asyncio.Future(loop=self.loop)} + + class TestMyProto(MyProto): + def connection_made(self, transport): + super(TestMyProto, self).connection_made(transport) + non_local['proto'].set_result(self) + + sock_ob = socket.socket(type=socket.SOCK_STREAM) + sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock_ob.bind(('0.0.0.0', 0)) + + f = self.loop.create_server(TestMyProto, sock=sock_ob) + server = self.loop.run_until_complete(f) + sock = server.sockets[0] + self.assertIs(sock, sock_ob) + + host, port = sock.getsockname() + self.assertEqual(host, '0.0.0.0') + client = socket.socket() + client.connect(('127.0.0.1', port)) + client.send(b'xxx') + client.close() + server.close() + + def test_create_server_addr_in_use(self): + sock_ob = socket.socket(type=socket.SOCK_STREAM) + sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock_ob.bind(('0.0.0.0', 0)) + + f = self.loop.create_server(MyProto, sock=sock_ob) + server = self.loop.run_until_complete(f) + sock = server.sockets[0] + host, port = sock.getsockname() + + f = self.loop.create_server(MyProto, host=host, port=port) + with self.assertRaises(socket.error) as cm: + self.loop.run_until_complete(f) + self.assertEqual(cm.exception.errno, errno.EADDRINUSE) + + server.close() + + @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled') + def test_create_server_dual_stack(self): + f_proto = asyncio.Future(loop=self.loop) + + class TestMyProto(MyProto): + def connection_made(self, transport): + super(TestMyProto, self).connection_made(transport) + f_proto.set_result(self) + + try_count = 0 + while True: + try: + port = support.find_unused_port() + f = self.loop.create_server(TestMyProto, host=None, port=port) + server = self.loop.run_until_complete(f) + except socket.error as ex: + if ex.errno == errno.EADDRINUSE: + try_count += 1 + self.assertGreaterEqual(5, try_count) + continue + else: + raise + else: + break + client = socket.socket() + client.connect(('127.0.0.1', port)) + client.send(b'xxx') + proto = self.loop.run_until_complete(f_proto) + proto.transport.close() + client.close() + + f_proto = asyncio.Future(loop=self.loop) + client = socket.socket(socket.AF_INET6) + client.connect(('::1', port)) + client.send(b'xxx') + proto = self.loop.run_until_complete(f_proto) + proto.transport.close() + client.close() + + server.close() + + def test_server_close(self): + f = self.loop.create_server(MyProto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + sock = server.sockets[0] + host, port = sock.getsockname() + + client = socket.socket() + client.connect(('127.0.0.1', port)) + client.send(b'xxx') + client.close() + + server.close() + + client = socket.socket() + self.assertRaises( + ConnectionRefusedError, wrap_error, client.connect, + ('127.0.0.1', port)) + client.close() + + def test_create_datagram_endpoint(self): + class TestMyDatagramProto(MyDatagramProto): + def __init__(inner_self): + super(TestMyDatagramProto, inner_self).__init__(loop=self.loop) + + def datagram_received(self, data, addr): + super(TestMyDatagramProto, self).datagram_received(data, addr) + self.transport.sendto(b'resp:'+data, addr) + + coro = self.loop.create_datagram_endpoint( + TestMyDatagramProto, local_addr=('127.0.0.1', 0)) + s_transport, server = self.loop.run_until_complete(coro) + host, port = s_transport.get_extra_info('sockname') + + self.assertIsInstance(s_transport, asyncio.Transport) + self.assertIsInstance(server, TestMyDatagramProto) + self.assertEqual('INITIALIZED', server.state) + self.assertIs(server.transport, s_transport) + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(loop=self.loop), + remote_addr=(host, port)) + transport, client = self.loop.run_until_complete(coro) + + self.assertIsInstance(transport, asyncio.Transport) + self.assertIsInstance(client, MyDatagramProto) + self.assertEqual('INITIALIZED', client.state) + self.assertIs(client.transport, transport) + + transport.sendto(b'xxx') + test_utils.run_until(self.loop, lambda: server.nbytes) + self.assertEqual(3, server.nbytes) + test_utils.run_until(self.loop, lambda: client.nbytes) + + # received + self.assertEqual(8, client.nbytes) + + # extra info is available + self.assertIsNotNone(transport.get_extra_info('sockname')) + + # close connection + transport.close() + self.loop.run_until_complete(client.done) + self.assertEqual('CLOSED', client.state) + server.transport.close() + + def test_internal_fds(self): + loop = self.create_event_loop() + if not isinstance(loop, selector_events.BaseSelectorEventLoop): + loop.close() + self.skipTest('loop is not a BaseSelectorEventLoop') + + self.assertEqual(1, loop._internal_fds) + loop.close() + self.assertEqual(0, loop._internal_fds) + self.assertIsNone(loop._csock) + self.assertIsNone(loop._ssock) + + @unittest.skipUnless(sys.platform != 'win32', + "Don't support pipes for Windows") + def test_read_pipe(self): + proto = MyReadPipeProto(loop=self.loop) + + rpipe, wpipe = os.pipe() + pipeobj = io.open(rpipe, 'rb', 1024) + + @asyncio.coroutine + def connect(): + t, p = yield From(self.loop.connect_read_pipe( + lambda: proto, pipeobj)) + self.assertIs(p, proto) + self.assertIs(t, proto.transport) + self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) + self.assertEqual(0, proto.nbytes) + + self.loop.run_until_complete(connect()) + + os.write(wpipe, b'1') + test_utils.run_until(self.loop, lambda: proto.nbytes >= 1) + self.assertEqual(1, proto.nbytes) + + os.write(wpipe, b'2345') + test_utils.run_until(self.loop, lambda: proto.nbytes >= 5) + self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) + self.assertEqual(5, proto.nbytes) + + os.close(wpipe) + self.loop.run_until_complete(proto.done) + self.assertEqual( + ['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state) + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('pipe')) + + @unittest.skipUnless(sys.platform != 'win32', + "Don't support pipes for Windows") + # select, poll and kqueue don't support character devices (PTY) on Mac OS X + # older than 10.6 (Snow Leopard) + @support.requires_mac_ver(10, 6) + # Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9 + @support.requires_freebsd_version(8) + def test_read_pty_output(self): + proto = MyReadPipeProto(loop=self.loop) + + master, slave = os.openpty() + master_read_obj = io.open(master, 'rb', 0) + + @asyncio.coroutine + def connect(): + t, p = yield From(self.loop.connect_read_pipe(lambda: proto, + master_read_obj)) + self.assertIs(p, proto) + self.assertIs(t, proto.transport) + self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) + self.assertEqual(0, proto.nbytes) + + self.loop.run_until_complete(connect()) + + os.write(slave, b'1') + test_utils.run_until(self.loop, lambda: proto.nbytes) + self.assertEqual(1, proto.nbytes) + + os.write(slave, b'2345') + test_utils.run_until(self.loop, lambda: proto.nbytes >= 5) + self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) + self.assertEqual(5, proto.nbytes) + + os.close(slave) + self.loop.run_until_complete(proto.done) + self.assertEqual( + ['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state) + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('pipe')) + + @unittest.skipUnless(sys.platform != 'win32', + "Don't support pipes for Windows") + def test_write_pipe(self): + rpipe, wpipe = os.pipe() + pipeobj = io.open(wpipe, 'wb', 1024) + + proto = MyWritePipeProto(loop=self.loop) + connect = self.loop.connect_write_pipe(lambda: proto, pipeobj) + transport, p = self.loop.run_until_complete(connect) + self.assertIs(p, proto) + self.assertIs(transport, proto.transport) + self.assertEqual('CONNECTED', proto.state) + + transport.write(b'1') + + data = bytearray() + def reader(data): + chunk = os.read(rpipe, 1024) + data += chunk + return len(data) + + test_utils.run_until(self.loop, lambda: reader(data) >= 1) + self.assertEqual(b'1', data) + + transport.write(b'2345') + test_utils.run_until(self.loop, lambda: reader(data) >= 5) + self.assertEqual(b'12345', data) + self.assertEqual('CONNECTED', proto.state) + + os.close(rpipe) + + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('pipe')) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + self.assertEqual('CLOSED', proto.state) + + @unittest.skipUnless(sys.platform != 'win32', + "Don't support pipes for Windows") + def test_write_pipe_disconnect_on_close(self): + rsock, wsock = test_utils.socketpair() + rsock.setblocking(False) + if hasattr(wsock, 'detach'): + wsock_fd = wsock.detach() + else: + # Python 2 + wsock_fd = wsock.fileno() + pipeobj = io.open(wsock_fd, 'wb', 1024) + + proto = MyWritePipeProto(loop=self.loop) + connect = self.loop.connect_write_pipe(lambda: proto, pipeobj) + transport, p = self.loop.run_until_complete(connect) + self.assertIs(p, proto) + self.assertIs(transport, proto.transport) + self.assertEqual('CONNECTED', proto.state) + + transport.write(b'1') + data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024)) + self.assertEqual(b'1', data) + + rsock.close() + + self.loop.run_until_complete(proto.done) + self.assertEqual('CLOSED', proto.state) + + @unittest.skipUnless(sys.platform != 'win32', + "Don't support pipes for Windows") + # select, poll and kqueue don't support character devices (PTY) on Mac OS X + # older than 10.6 (Snow Leopard) + @support.requires_mac_ver(10, 6) + def test_write_pty(self): + master, slave = os.openpty() + slave_write_obj = io.open(slave, 'wb', 0) + + proto = MyWritePipeProto(loop=self.loop) + connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj) + transport, p = self.loop.run_until_complete(connect) + self.assertIs(p, proto) + self.assertIs(transport, proto.transport) + self.assertEqual('CONNECTED', proto.state) + + transport.write(b'1') + + data = bytearray() + def reader(data): + chunk = os.read(master, 1024) + data += chunk + return len(data) + + test_utils.run_until(self.loop, lambda: reader(data) >= 1, + timeout=10) + self.assertEqual(b'1', data) + + transport.write(b'2345') + test_utils.run_until(self.loop, lambda: reader(data) >= 5, + timeout=10) + self.assertEqual(b'12345', data) + self.assertEqual('CONNECTED', proto.state) + + os.close(master) + + # extra info is available + self.assertIsNotNone(proto.transport.get_extra_info('pipe')) + + # close connection + proto.transport.close() + self.loop.run_until_complete(proto.done) + self.assertEqual('CLOSED', proto.state) + + def test_prompt_cancellation(self): + r, w = test_utils.socketpair() + r.setblocking(False) + f = self.loop.sock_recv(r, 1) + ov = getattr(f, 'ov', None) + if ov is not None: + self.assertTrue(ov.pending) + + @asyncio.coroutine + def main(): + try: + self.loop.call_soon(f.cancel) + yield From(f) + except asyncio.CancelledError: + res = 'cancelled' + else: + res = None + finally: + self.loop.stop() + raise Return(res) + + start = time_monotonic() + t = asyncio.Task(main(), loop=self.loop) + self.loop.run_forever() + elapsed = time_monotonic() - start + + self.assertLess(elapsed, 0.1) + self.assertEqual(t.result(), 'cancelled') + self.assertRaises(asyncio.CancelledError, f.result) + if ov is not None: + self.assertFalse(ov.pending) + self.loop._stop_serving(r) + + r.close() + w.close() + + def test_timeout_rounding(self): + def _run_once(): + self.loop._run_once_counter += 1 + orig_run_once() + + orig_run_once = self.loop._run_once + self.loop._run_once_counter = 0 + self.loop._run_once = _run_once + + @asyncio.coroutine + def wait(): + loop = self.loop + yield From(asyncio.sleep(1e-2, loop=loop)) + yield From(asyncio.sleep(1e-4, loop=loop)) + yield From(asyncio.sleep(1e-6, loop=loop)) + yield From(asyncio.sleep(1e-8, loop=loop)) + yield From(asyncio.sleep(1e-10, loop=loop)) + + self.loop.run_until_complete(wait()) + # The ideal number of call is 22, but on some platforms, the selector + # may sleep at little bit less than timeout depending on the resolution + # of the clock used by the kernel. Tolerate a few useless calls on + # these platforms. + self.assertLessEqual(self.loop._run_once_counter, 30, + {'calls': self.loop._run_once_counter, + 'clock_resolution': self.loop._clock_resolution, + 'selector': self.loop._selector.__class__.__name__}) + + def test_sock_connect_address(self): + # In debug mode, sock_connect() must ensure that the address is already + # resolved (call _check_resolved_address()) + self.loop.set_debug(True) + + addresses = [(socket.AF_INET, ('www.python.org', 80))] + if support.IPV6_ENABLED: + addresses.extend(( + (socket.AF_INET6, ('www.python.org', 80)), + (socket.AF_INET6, ('www.python.org', 80, 0, 0)), + )) + + for family, address in addresses: + for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM): + sock = socket.socket(family, sock_type) + with contextlib.closing(sock): + sock.setblocking(False) + connect = self.loop.sock_connect(sock, address) + with self.assertRaises(ValueError) as cm: + self.loop.run_until_complete(connect) + self.assertIn('address must be resolved', + str(cm.exception)) + + def test_remove_fds_after_closing(self): + loop = self.create_event_loop() + callback = lambda: None + r, w = test_utils.socketpair() + self.addCleanup(r.close) + self.addCleanup(w.close) + loop.add_reader(r, callback) + loop.add_writer(w, callback) + loop.close() + self.assertFalse(loop.remove_reader(r)) + self.assertFalse(loop.remove_writer(w)) + + def test_add_fds_after_closing(self): + loop = self.create_event_loop() + callback = lambda: None + r, w = test_utils.socketpair() + self.addCleanup(r.close) + self.addCleanup(w.close) + loop.close() + with self.assertRaises(RuntimeError): + loop.add_reader(r, callback) + with self.assertRaises(RuntimeError): + loop.add_writer(w, callback) + + def test_close_running_event_loop(self): + @asyncio.coroutine + def close_loop(loop): + self.loop.close() + + coro = close_loop(self.loop) + with self.assertRaises(RuntimeError): + self.loop.run_until_complete(coro) + + def test_close(self): + self.loop.close() + + @asyncio.coroutine + def test(): + pass + + func = lambda: False + coro = test() + self.addCleanup(coro.close) + + # operation blocked when the loop is closed + with self.assertRaises(RuntimeError): + self.loop.run_forever() + with self.assertRaises(RuntimeError): + fut = asyncio.Future(loop=self.loop) + self.loop.run_until_complete(fut) + with self.assertRaises(RuntimeError): + self.loop.call_soon(func) + with self.assertRaises(RuntimeError): + self.loop.call_soon_threadsafe(func) + with self.assertRaises(RuntimeError): + self.loop.call_later(1.0, func) + with self.assertRaises(RuntimeError): + self.loop.call_at(self.loop.time() + .0, func) + with self.assertRaises(RuntimeError): + self.loop.run_in_executor(None, func) + with self.assertRaises(RuntimeError): + self.loop.create_task(coro) + with self.assertRaises(RuntimeError): + self.loop.add_signal_handler(signal.SIGTERM, func) + + +class SubprocessTestsMixin(object): + + def check_terminated(self, returncode): + if sys.platform == 'win32': + self.assertIsInstance(returncode, int) + # expect 1 but sometimes get 0 + else: + self.assertEqual(-signal.SIGTERM, returncode) + + def check_killed(self, returncode): + if sys.platform == 'win32': + self.assertIsInstance(returncode, int) + # expect 1 but sometimes get 0 + else: + self.assertEqual(-signal.SIGKILL, returncode) + + def test_subprocess_exec(self): + prog = os.path.join(os.path.dirname(__file__), 'echo.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + + stdin = transp.get_pipe_transport(0) + stdin.write(b'Python The Winner') + self.loop.run_until_complete(proto.got_data[1].wait()) + with test_utils.disable_logger(): + transp.close() + self.loop.run_until_complete(proto.completed) + self.check_killed(proto.returncode) + self.assertEqual(b'Python The Winner', proto.data[1]) + + def test_subprocess_interactive(self): + prog = os.path.join(os.path.dirname(__file__), 'echo.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + self.assertEqual('CONNECTED', proto.state) + + stdin = transp.get_pipe_transport(0) + stdin.write(b'Python ') + self.loop.run_until_complete(proto.got_data[1].wait()) + proto.got_data[1].clear() + self.assertEqual(b'Python ', proto.data[1]) + + stdin.write(b'The Winner') + self.loop.run_until_complete(proto.got_data[1].wait()) + self.assertEqual(b'Python The Winner', proto.data[1]) + + with test_utils.disable_logger(): + transp.close() + self.loop.run_until_complete(proto.completed) + self.check_killed(proto.returncode) + + def test_subprocess_shell(self): + connect = self.loop.subprocess_shell( + functools.partial(MySubprocessProtocol, self.loop), + 'echo Python') + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + transp.get_pipe_transport(0).close() + self.loop.run_until_complete(proto.completed) + self.assertEqual(0, proto.returncode) + self.assertTrue(all(f.done() for f in proto.disconnects.values())) + self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python') + self.assertEqual(proto.data[2], b'') + transp.close() + + def test_subprocess_exitcode(self): + connect = self.loop.subprocess_shell( + functools.partial(MySubprocessProtocol, self.loop), + 'exit 7', stdin=None, stdout=None, stderr=None) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.completed) + self.assertEqual(7, proto.returncode) + transp.close() + + def test_subprocess_close_after_finish(self): + connect = self.loop.subprocess_shell( + functools.partial(MySubprocessProtocol, self.loop), + 'exit 7', stdin=None, stdout=None, stderr=None) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.assertIsNone(transp.get_pipe_transport(0)) + self.assertIsNone(transp.get_pipe_transport(1)) + self.assertIsNone(transp.get_pipe_transport(2)) + self.loop.run_until_complete(proto.completed) + self.assertEqual(7, proto.returncode) + self.assertIsNone(transp.close()) + + def test_subprocess_kill(self): + prog = os.path.join(os.path.dirname(__file__), 'echo.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + transp.kill() + self.loop.run_until_complete(proto.completed) + self.check_killed(proto.returncode) + transp.close() + + def test_subprocess_terminate(self): + prog = os.path.join(os.path.dirname(__file__), 'echo.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + transp.terminate() + self.loop.run_until_complete(proto.completed) + self.check_terminated(proto.returncode) + transp.close() + + @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP") + def test_subprocess_send_signal(self): + prog = os.path.join(os.path.dirname(__file__), 'echo.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + transp.send_signal(signal.SIGHUP) + self.loop.run_until_complete(proto.completed) + self.assertEqual(-signal.SIGHUP, proto.returncode) + transp.close() + + def test_subprocess_stderr(self): + prog = os.path.join(os.path.dirname(__file__), 'echo2.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + stdin = transp.get_pipe_transport(0) + stdin.write(b'test') + + self.loop.run_until_complete(proto.completed) + + transp.close() + self.assertEqual(b'OUT:test', proto.data[1]) + self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2]) + self.assertEqual(0, proto.returncode) + + def test_subprocess_stderr_redirect_to_stdout(self): + prog = os.path.join(os.path.dirname(__file__), 'echo2.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog, stderr=subprocess.STDOUT) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + stdin = transp.get_pipe_transport(0) + self.assertIsNotNone(transp.get_pipe_transport(1)) + self.assertIsNone(transp.get_pipe_transport(2)) + + stdin.write(b'test') + self.loop.run_until_complete(proto.completed) + self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'), + proto.data[1]) + self.assertEqual(b'', proto.data[2]) + + transp.close() + self.assertEqual(0, proto.returncode) + + def test_subprocess_close_client_stream(self): + prog = os.path.join(os.path.dirname(__file__), 'echo3.py') + + connect = self.loop.subprocess_exec( + functools.partial(MySubprocessProtocol, self.loop), + sys.executable, prog) + transp, proto = self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.connected) + + stdin = transp.get_pipe_transport(0) + stdout = transp.get_pipe_transport(1) + stdin.write(b'test') + self.loop.run_until_complete(proto.got_data[1].wait()) + self.assertEqual(b'OUT:test', proto.data[1]) + + stdout.close() + self.loop.run_until_complete(proto.disconnects[1]) + stdin.write(b'xxx') + self.loop.run_until_complete(proto.got_data[2].wait()) + if sys.platform != 'win32': + self.assertEqual(b'ERR:BrokenPipeError', proto.data[2]) + else: + # After closing the read-end of a pipe, writing to the + # write-end using os.write() fails with errno==EINVAL and + # GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using + # WriteFile() we get ERROR_BROKEN_PIPE as expected.) + self.assertEqual(b'ERR:OSError', proto.data[2]) + with test_utils.disable_logger(): + transp.close() + self.loop.run_until_complete(proto.completed) + self.check_killed(proto.returncode) + + @unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()") + def test_subprocess_wait_no_same_group(self): + # start the new process in a new session + connect = self.loop.subprocess_shell( + functools.partial(MySubprocessProtocol, self.loop), + 'exit 7', stdin=None, stdout=None, stderr=None, + start_new_session=True) + _, proto = yield self.loop.run_until_complete(connect) + self.assertIsInstance(proto, MySubprocessProtocol) + self.loop.run_until_complete(proto.completed) + self.assertEqual(7, proto.returncode) + + def test_subprocess_exec_invalid_args(self): + @asyncio.coroutine + def connect(**kwds): + yield From(self.loop.subprocess_exec( + asyncio.SubprocessProtocol, + 'pwd', **kwds)) + + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(universal_newlines=True)) + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(bufsize=4096)) + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(shell=True)) + + def test_subprocess_shell_invalid_args(self): + @asyncio.coroutine + def connect(cmd=None, **kwds): + if not cmd: + cmd = 'pwd' + yield From(self.loop.subprocess_shell( + asyncio.SubprocessProtocol, + cmd, **kwds)) + + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(['ls', '-l'])) + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(universal_newlines=True)) + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(bufsize=4096)) + with self.assertRaises(ValueError): + self.loop.run_until_complete(connect(shell=False)) + + +if sys.platform == 'win32': + + class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase): + + def create_event_loop(self): + return asyncio.SelectorEventLoop() + + class ProactorEventLoopTests(EventLoopTestsMixin, + SubprocessTestsMixin, + test_utils.TestCase): + + def create_event_loop(self): + return asyncio.ProactorEventLoop() + + if not sslproto._is_sslproto_available(): + def test_create_ssl_connection(self): + raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") + + def test_create_server_ssl(self): + raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") + + def test_create_server_ssl_verify_failed(self): + raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") + + def test_create_server_ssl_match_failed(self): + raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") + + def test_create_server_ssl_verified(self): + raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") + + def test_legacy_create_ssl_connection(self): + raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") + + def test_legacy_create_server_ssl(self): + raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") + + def test_legacy_create_server_ssl_verify_failed(self): + raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") + + def test_legacy_create_server_ssl_match_failed(self): + raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") + + def test_legacy_create_server_ssl_verified(self): + raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") + + def test_reader_callback(self): + raise unittest.SkipTest("IocpEventLoop does not have add_reader()") + + def test_reader_callback_cancel(self): + raise unittest.SkipTest("IocpEventLoop does not have add_reader()") + + def test_writer_callback(self): + raise unittest.SkipTest("IocpEventLoop does not have add_writer()") + + def test_writer_callback_cancel(self): + raise unittest.SkipTest("IocpEventLoop does not have add_writer()") + + def test_create_datagram_endpoint(self): + raise unittest.SkipTest( + "IocpEventLoop does not have create_datagram_endpoint()") + + def test_remove_fds_after_closing(self): + raise unittest.SkipTest("IocpEventLoop does not have add_reader()") +else: + from trollius import selectors + + class UnixEventLoopTestsMixin(EventLoopTestsMixin): + def setUp(self): + super(UnixEventLoopTestsMixin, self).setUp() + watcher = asyncio.SafeChildWatcher() + watcher.attach_loop(self.loop) + asyncio.set_child_watcher(watcher) + + def tearDown(self): + asyncio.set_child_watcher(None) + super(UnixEventLoopTestsMixin, self).tearDown() + + if hasattr(selectors, 'KqueueSelector'): + class KqueueEventLoopTests(UnixEventLoopTestsMixin, + SubprocessTestsMixin, + test_utils.TestCase): + + def create_event_loop(self): + return asyncio.SelectorEventLoop( + selectors.KqueueSelector()) + + # kqueue doesn't support character devices (PTY) on Mac OS X older + # than 10.9 (Maverick) + @support.requires_mac_ver(10, 9) + # Issue #20667: KqueueEventLoopTests.test_read_pty_output() + # hangs on OpenBSD 5.5 + @unittest.skipIf(sys.platform.startswith('openbsd'), + 'test hangs on OpenBSD') + def test_read_pty_output(self): + super(KqueueEventLoopTests, self).test_read_pty_output() + + # kqueue doesn't support character devices (PTY) on Mac OS X older + # than 10.9 (Maverick) + @support.requires_mac_ver(10, 9) + def test_write_pty(self): + super(KqueueEventLoopTests, self).test_write_pty() + + if hasattr(selectors, 'EpollSelector'): + class EPollEventLoopTests(UnixEventLoopTestsMixin, + SubprocessTestsMixin, + test_utils.TestCase): + + def create_event_loop(self): + return asyncio.SelectorEventLoop(selectors.EpollSelector()) + + if hasattr(selectors, 'PollSelector'): + class PollEventLoopTests(UnixEventLoopTestsMixin, + SubprocessTestsMixin, + test_utils.TestCase): + + def create_event_loop(self): + return asyncio.SelectorEventLoop(selectors.PollSelector()) + + # Should always exist. + class SelectEventLoopTests(UnixEventLoopTestsMixin, + SubprocessTestsMixin, + test_utils.TestCase): + + def create_event_loop(self): + return asyncio.SelectorEventLoop(selectors.SelectSelector()) + + +def noop(*args): + pass + + +class HandleTests(test_utils.TestCase): + + def setUp(self): + self.loop = mock.Mock() + self.loop.get_debug.return_value = True + + def test_handle(self): + def callback(*args): + return args + + args = () + h = asyncio.Handle(callback, args, self.loop) + self.assertIs(h._callback, callback) + self.assertIs(h._args, args) + self.assertFalse(h._cancelled) + + h.cancel() + self.assertTrue(h._cancelled) + + def test_handle_from_handle(self): + def callback(*args): + return args + h1 = asyncio.Handle(callback, (), loop=self.loop) + self.assertRaises( + AssertionError, asyncio.Handle, h1, (), self.loop) + + def test_callback_with_exception(self): + def callback(): + raise ValueError() + + self.loop = mock.Mock() + self.loop.call_exception_handler = mock.Mock() + + h = asyncio.Handle(callback, (), self.loop) + h._run() + + self.loop.call_exception_handler.assert_called_with({ + 'message': test_utils.MockPattern('Exception in callback.*'), + 'exception': mock.ANY, + 'handle': h, + 'source_traceback': h._source_traceback, + }) + + def test_handle_weakref(self): + wd = weakref.WeakValueDictionary() + h = asyncio.Handle(lambda: None, (), self.loop) + wd['h'] = h # Would fail without __weakref__ slot. + + def test_handle_repr(self): + self.loop.get_debug.return_value = False + + # simple function + h = asyncio.Handle(noop, (1, 2), self.loop) + filename, lineno = test_utils.get_function_source(noop) + self.assertEqual(repr(h), + '' + % (filename, lineno)) + + # cancelled handle + h.cancel() + self.assertEqual(repr(h), + '') + + # decorated function + cb = asyncio.coroutine(noop) + h = asyncio.Handle(cb, (), self.loop) + self.assertEqual(repr(h), + '' + % (filename, lineno)) + + # partial function + cb = functools.partial(noop, 1, 2) + h = asyncio.Handle(cb, (3,), self.loop) + regex = (r'^$' + % (re.escape(filename), lineno)) + self.assertRegex(repr(h), regex) + + # partial method + if sys.version_info >= (3, 4): + method = HandleTests.test_handle_repr + cb = functools.partialmethod(method) + filename, lineno = test_utils.get_function_source(method) + h = asyncio.Handle(cb, (), self.loop) + + cb_regex = r'' + cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex) + regex = (r'^$' + % (cb_regex, re.escape(filename), lineno)) + self.assertRegex(repr(h), regex) + + def test_handle_repr_debug(self): + self.loop.get_debug.return_value = True + + # simple function + create_filename = sys._getframe().f_code.co_filename + create_lineno = sys._getframe().f_lineno + 1 + h = asyncio.Handle(noop, (1, 2), self.loop) + filename, lineno = test_utils.get_function_source(noop) + self.assertEqual(repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + # cancelled handle + h.cancel() + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + # double cancellation won't overwrite _repr + h.cancel() + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + def test_handle_source_traceback(self): + loop = asyncio.get_event_loop_policy().new_event_loop() + loop.set_debug(True) + self.set_event_loop(loop) + + def check_source_traceback(h): + lineno = sys._getframe(1).f_lineno - 1 + self.assertIsInstance(h._source_traceback, list) + filename = sys._getframe().f_code.co_filename + self.assertEqual(h._source_traceback[-1][:3], + (filename, + lineno, + 'test_handle_source_traceback')) + + # call_soon + h = loop.call_soon(noop) + check_source_traceback(h) + + # call_soon_threadsafe + h = loop.call_soon_threadsafe(noop) + check_source_traceback(h) + + # call_later + h = loop.call_later(0, noop) + check_source_traceback(h) + + # call_at + h = loop.call_later(0, noop) + check_source_traceback(h) + + +class TimerTests(test_utils.TestCase): + + def setUp(self): + self.loop = mock.Mock() + + def test_hash(self): + when = time_monotonic() + h = asyncio.TimerHandle(when, lambda: False, (), + mock.Mock()) + self.assertEqual(hash(h), hash(when)) + + def test_timer(self): + def callback(*args): + return args + + args = (1, 2, 3) + when = time_monotonic() + h = asyncio.TimerHandle(when, callback, args, mock.Mock()) + self.assertIs(h._callback, callback) + self.assertIs(h._args, args) + self.assertFalse(h._cancelled) + + # cancel + h.cancel() + self.assertTrue(h._cancelled) + self.assertIsNone(h._callback) + self.assertIsNone(h._args) + + # when cannot be None + self.assertRaises(AssertionError, + asyncio.TimerHandle, None, callback, args, + self.loop) + + def test_timer_repr(self): + self.loop.get_debug.return_value = False + + # simple function + h = asyncio.TimerHandle(123, noop, (), self.loop) + src = test_utils.get_function_source(noop) + self.assertEqual(repr(h), + '' % src) + + # cancelled handle + h.cancel() + self.assertEqual(repr(h), + '') + + def test_timer_repr_debug(self): + self.loop.get_debug.return_value = True + + # simple function + create_filename = sys._getframe().f_code.co_filename + create_lineno = sys._getframe().f_lineno + 1 + h = asyncio.TimerHandle(123, noop, (), self.loop) + filename, lineno = test_utils.get_function_source(noop) + self.assertEqual(repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + # cancelled handle + h.cancel() + self.assertEqual(repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + + def test_timer_comparison(self): + def callback(*args): + return args + + when = time_monotonic() + + h1 = asyncio.TimerHandle(when, callback, (), self.loop) + h2 = asyncio.TimerHandle(when, callback, (), self.loop) + # TODO: Use assertLess etc. + self.assertFalse(h1 < h2) + self.assertFalse(h2 < h1) + self.assertTrue(h1 <= h2) + self.assertTrue(h2 <= h1) + self.assertFalse(h1 > h2) + self.assertFalse(h2 > h1) + self.assertTrue(h1 >= h2) + self.assertTrue(h2 >= h1) + self.assertTrue(h1 == h2) + self.assertFalse(h1 != h2) + + h2.cancel() + self.assertFalse(h1 == h2) + + h1 = asyncio.TimerHandle(when, callback, (), self.loop) + h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop) + self.assertTrue(h1 < h2) + self.assertFalse(h2 < h1) + self.assertTrue(h1 <= h2) + self.assertFalse(h2 <= h1) + self.assertFalse(h1 > h2) + self.assertTrue(h2 > h1) + self.assertFalse(h1 >= h2) + self.assertTrue(h2 >= h1) + self.assertFalse(h1 == h2) + self.assertTrue(h1 != h2) + + h3 = asyncio.Handle(callback, (), self.loop) + self.assertIs(NotImplemented, h1.__eq__(h3)) + self.assertIs(NotImplemented, h1.__ne__(h3)) + + +class AbstractEventLoopTests(test_utils.TestCase): + + def test_not_implemented(self): + f = mock.Mock() + loop = asyncio.AbstractEventLoop() + self.assertRaises( + NotImplementedError, loop.run_forever) + self.assertRaises( + NotImplementedError, loop.run_until_complete, None) + self.assertRaises( + NotImplementedError, loop.stop) + self.assertRaises( + NotImplementedError, loop.is_running) + # skip some tests if the AbstractEventLoop class comes from asyncio + # and the asyncio version (python version in fact) is older than 3.4.2 + if events.asyncio is None or sys.version_info >= (3, 4, 2): + self.assertRaises( + NotImplementedError, loop.is_closed) + self.assertRaises( + NotImplementedError, loop.create_task, None) + self.assertRaises( + NotImplementedError, loop.close) + self.assertRaises( + NotImplementedError, loop.call_later, None, None) + self.assertRaises( + NotImplementedError, loop.call_at, f, f) + self.assertRaises( + NotImplementedError, loop.call_soon, None) + self.assertRaises( + NotImplementedError, loop.time) + self.assertRaises( + NotImplementedError, loop.call_soon_threadsafe, None) + self.assertRaises( + NotImplementedError, loop.run_in_executor, f, f) + self.assertRaises( + NotImplementedError, loop.set_default_executor, f) + self.assertRaises( + NotImplementedError, loop.getaddrinfo, 'localhost', 8080) + self.assertRaises( + NotImplementedError, loop.getnameinfo, ('localhost', 8080)) + self.assertRaises( + NotImplementedError, loop.create_connection, f) + self.assertRaises( + NotImplementedError, loop.create_server, f) + self.assertRaises( + NotImplementedError, loop.create_datagram_endpoint, f) + self.assertRaises( + NotImplementedError, loop.add_reader, 1, f) + self.assertRaises( + NotImplementedError, loop.remove_reader, 1) + self.assertRaises( + NotImplementedError, loop.add_writer, 1, f) + self.assertRaises( + NotImplementedError, loop.remove_writer, 1) + self.assertRaises( + NotImplementedError, loop.sock_recv, f, 10) + self.assertRaises( + NotImplementedError, loop.sock_sendall, f, 10) + self.assertRaises( + NotImplementedError, loop.sock_connect, f, f) + self.assertRaises( + NotImplementedError, loop.sock_accept, f) + self.assertRaises( + NotImplementedError, loop.add_signal_handler, 1, f) + self.assertRaises( + NotImplementedError, loop.remove_signal_handler, 1) + self.assertRaises( + NotImplementedError, loop.remove_signal_handler, 1) + self.assertRaises( + NotImplementedError, loop.connect_read_pipe, f, + mock.sentinel.pipe) + self.assertRaises( + NotImplementedError, loop.connect_write_pipe, f, + mock.sentinel.pipe) + self.assertRaises( + NotImplementedError, loop.subprocess_shell, f, + mock.sentinel) + self.assertRaises( + NotImplementedError, loop.subprocess_exec, f) + self.assertRaises( + NotImplementedError, loop.set_exception_handler, f) + self.assertRaises( + NotImplementedError, loop.default_exception_handler, f) + self.assertRaises( + NotImplementedError, loop.call_exception_handler, f) + self.assertRaises( + NotImplementedError, loop.get_debug) + self.assertRaises( + NotImplementedError, loop.set_debug, f) + + +class ProtocolsAbsTests(test_utils.TestCase): + + def test_empty(self): + f = mock.Mock() + p = asyncio.Protocol() + self.assertIsNone(p.connection_made(f)) + self.assertIsNone(p.connection_lost(f)) + self.assertIsNone(p.data_received(f)) + self.assertIsNone(p.eof_received()) + + dp = asyncio.DatagramProtocol() + self.assertIsNone(dp.connection_made(f)) + self.assertIsNone(dp.connection_lost(f)) + self.assertIsNone(dp.error_received(f)) + self.assertIsNone(dp.datagram_received(f, f)) + + sp = asyncio.SubprocessProtocol() + self.assertIsNone(sp.connection_made(f)) + self.assertIsNone(sp.connection_lost(f)) + self.assertIsNone(sp.pipe_data_received(1, f)) + self.assertIsNone(sp.pipe_connection_lost(1, f)) + self.assertIsNone(sp.process_exited()) + + +class PolicyTests(test_utils.TestCase): + + def test_event_loop_policy(self): + policy = asyncio.AbstractEventLoopPolicy() + self.assertRaises(NotImplementedError, policy.get_event_loop) + self.assertRaises(NotImplementedError, policy.set_event_loop, object()) + self.assertRaises(NotImplementedError, policy.new_event_loop) + self.assertRaises(NotImplementedError, policy.get_child_watcher) + self.assertRaises(NotImplementedError, policy.set_child_watcher, + object()) + + def test_get_event_loop(self): + policy = asyncio.DefaultEventLoopPolicy() + self.assertIsNone(policy._local._loop) + + loop = policy.get_event_loop() + self.assertIsInstance(loop, asyncio.AbstractEventLoop) + + self.assertIs(policy._local._loop, loop) + self.assertIs(loop, policy.get_event_loop()) + loop.close() + + def test_get_event_loop_calls_set_event_loop(self): + policy = asyncio.DefaultEventLoopPolicy() + + with mock.patch.object( + policy, "set_event_loop", + wraps=policy.set_event_loop) as m_set_event_loop: + + loop = policy.get_event_loop() + + # policy._local._loop must be set through .set_event_loop() + # (the unix DefaultEventLoopPolicy needs this call to attach + # the child watcher correctly) + m_set_event_loop.assert_called_with(loop) + + loop.close() + + def test_get_event_loop_after_set_none(self): + policy = asyncio.DefaultEventLoopPolicy() + policy.set_event_loop(None) + self.assertRaises(RuntimeError, policy.get_event_loop) + + @mock.patch('trollius.events.threading.current_thread') + def test_get_event_loop_thread(self, m_current_thread): + + def f(): + policy = asyncio.DefaultEventLoopPolicy() + self.assertRaises(RuntimeError, policy.get_event_loop) + + th = threading.Thread(target=f) + th.start() + th.join() + + def test_new_event_loop(self): + policy = asyncio.DefaultEventLoopPolicy() + + loop = policy.new_event_loop() + self.assertIsInstance(loop, asyncio.AbstractEventLoop) + loop.close() + + def test_set_event_loop(self): + policy = asyncio.DefaultEventLoopPolicy() + old_loop = policy.get_event_loop() + + self.assertRaises(AssertionError, policy.set_event_loop, object()) + + loop = policy.new_event_loop() + policy.set_event_loop(loop) + self.assertIs(loop, policy.get_event_loop()) + self.assertIsNot(old_loop, policy.get_event_loop()) + loop.close() + old_loop.close() + + def test_get_event_loop_policy(self): + policy = asyncio.get_event_loop_policy() + self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy) + self.assertIs(policy, asyncio.get_event_loop_policy()) + + def test_set_event_loop_policy(self): + self.assertRaises( + AssertionError, asyncio.set_event_loop_policy, object()) + + old_policy = asyncio.get_event_loop_policy() + + policy = asyncio.DefaultEventLoopPolicy() + asyncio.set_event_loop_policy(policy) + self.assertIs(policy, asyncio.get_event_loop_policy()) + self.assertIsNot(policy, old_policy) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_futures.py b/tests/test_futures.py new file mode 100644 index 00000000..78a097b2 --- /dev/null +++ b/tests/test_futures.py @@ -0,0 +1,458 @@ +"""Tests for futures.py.""" + +try: + import concurrent.futures +except ImportError: + concurrent = None +import re +import six +import sys +import threading + +import trollius as asyncio +from trollius import From +from trollius import compat +from trollius import test_support as support +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +def get_thread_ident(): + return threading.current_thread().ident + +def _fakefunc(f): + return f + +def first_cb(): + pass + +def last_cb(): + pass + + +class FutureTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.addCleanup(self.loop.close) + + def test_initial_state(self): + f = asyncio.Future(loop=self.loop) + self.assertFalse(f.cancelled()) + self.assertFalse(f.done()) + f.cancel() + self.assertTrue(f.cancelled()) + + def test_init_constructor_default_loop(self): + asyncio.set_event_loop(self.loop) + f = asyncio.Future() + self.assertIs(f._loop, self.loop) + + def test_cancel(self): + f = asyncio.Future(loop=self.loop) + self.assertTrue(f.cancel()) + self.assertTrue(f.cancelled()) + self.assertTrue(f.done()) + self.assertRaises(asyncio.CancelledError, f.result) + self.assertRaises(asyncio.CancelledError, f.exception) + self.assertRaises(asyncio.InvalidStateError, f.set_result, None) + self.assertRaises(asyncio.InvalidStateError, f.set_exception, None) + self.assertFalse(f.cancel()) + + def test_result(self): + f = asyncio.Future(loop=self.loop) + self.assertRaises(asyncio.InvalidStateError, f.result) + + f.set_result(42) + self.assertFalse(f.cancelled()) + self.assertTrue(f.done()) + self.assertEqual(f.result(), 42) + self.assertEqual(f.exception(), None) + self.assertRaises(asyncio.InvalidStateError, f.set_result, None) + self.assertRaises(asyncio.InvalidStateError, f.set_exception, None) + self.assertFalse(f.cancel()) + + def test_exception(self): + exc = RuntimeError() + f = asyncio.Future(loop=self.loop) + self.assertRaises(asyncio.InvalidStateError, f.exception) + + f.set_exception(exc) + self.assertFalse(f.cancelled()) + self.assertTrue(f.done()) + self.assertRaises(RuntimeError, f.result) + self.assertEqual(f.exception(), exc) + self.assertRaises(asyncio.InvalidStateError, f.set_result, None) + self.assertRaises(asyncio.InvalidStateError, f.set_exception, None) + self.assertFalse(f.cancel()) + + def test_exception_class(self): + f = asyncio.Future(loop=self.loop) + f.set_exception(RuntimeError) + self.assertIsInstance(f.exception(), RuntimeError) + + def test_future_repr(self): + self.loop.set_debug(True) + f_pending_debug = asyncio.Future(loop=self.loop) + frame = f_pending_debug._source_traceback[-1] + self.assertEqual(repr(f_pending_debug), + '' + % (frame[0], frame[1])) + f_pending_debug.cancel() + + self.loop.set_debug(False) + f_pending = asyncio.Future(loop=self.loop) + self.assertEqual(repr(f_pending), '') + f_pending.cancel() + + f_cancelled = asyncio.Future(loop=self.loop) + f_cancelled.cancel() + self.assertEqual(repr(f_cancelled), '') + + f_result = asyncio.Future(loop=self.loop) + f_result.set_result(4) + self.assertEqual(repr(f_result), '') + self.assertEqual(f_result.result(), 4) + + exc = RuntimeError() + f_exception = asyncio.Future(loop=self.loop) + f_exception.set_exception(exc) + self.assertEqual(repr(f_exception), + '') + self.assertIs(f_exception.exception(), exc) + + def func_repr(func): + filename, lineno = test_utils.get_function_source(func) + func_name = getattr(func, '__qualname__', func.__name__) + text = '%s() at %s:%s' % (func_name, filename, lineno) + return re.escape(text) + + f_one_callbacks = asyncio.Future(loop=self.loop) + f_one_callbacks.add_done_callback(_fakefunc) + fake_repr = func_repr(_fakefunc) + self.assertRegex(repr(f_one_callbacks), + r'' % fake_repr) + f_one_callbacks.cancel() + self.assertEqual(repr(f_one_callbacks), + '') + + f_two_callbacks = asyncio.Future(loop=self.loop) + f_two_callbacks.add_done_callback(first_cb) + f_two_callbacks.add_done_callback(last_cb) + first_repr = func_repr(first_cb) + last_repr = func_repr(last_cb) + self.assertRegex(repr(f_two_callbacks), + r'' + % (first_repr, last_repr)) + + f_many_callbacks = asyncio.Future(loop=self.loop) + f_many_callbacks.add_done_callback(first_cb) + for i in range(8): + f_many_callbacks.add_done_callback(_fakefunc) + f_many_callbacks.add_done_callback(last_cb) + cb_regex = r'%s, <8 more>, %s' % (first_repr, last_repr) + self.assertRegex(repr(f_many_callbacks), + r'' % cb_regex) + f_many_callbacks.cancel() + self.assertEqual(repr(f_many_callbacks), + '') + + def test_copy_state(self): + # Test the internal _copy_state method since it's being directly + # invoked in other modules. + f = asyncio.Future(loop=self.loop) + f.set_result(10) + + newf = asyncio.Future(loop=self.loop) + newf._copy_state(f) + self.assertTrue(newf.done()) + self.assertEqual(newf.result(), 10) + + f_exception = asyncio.Future(loop=self.loop) + f_exception.set_exception(RuntimeError()) + + newf_exception = asyncio.Future(loop=self.loop) + newf_exception._copy_state(f_exception) + self.assertTrue(newf_exception.done()) + self.assertRaises(RuntimeError, newf_exception.result) + + f_cancelled = asyncio.Future(loop=self.loop) + f_cancelled.cancel() + + newf_cancelled = asyncio.Future(loop=self.loop) + newf_cancelled._copy_state(f_cancelled) + self.assertTrue(newf_cancelled.cancelled()) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_abandoned(self, m_log): + fut = asyncio.Future(loop=self.loop) + del fut + self.assertFalse(m_log.error.called) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_result_unretrieved(self, m_log): + fut = asyncio.Future(loop=self.loop) + fut.set_result(42) + del fut + self.assertFalse(m_log.error.called) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_result_retrieved(self, m_log): + fut = asyncio.Future(loop=self.loop) + fut.set_result(42) + fut.result() + del fut + self.assertFalse(m_log.error.called) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_exception_unretrieved(self, m_log): + self.loop.set_debug(True) + asyncio.set_event_loop(self.loop) + fut = asyncio.Future(loop=self.loop) + fut.set_exception(RuntimeError('boom')) + del fut + test_utils.run_briefly(self.loop) + support.gc_collect() + self.assertTrue(m_log.error.called) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_exception_retrieved(self, m_log): + fut = asyncio.Future(loop=self.loop) + fut.set_exception(RuntimeError('boom')) + fut.exception() + del fut + self.assertFalse(m_log.error.called) + + @mock.patch('trollius.base_events.logger') + def test_tb_logger_exception_result_retrieved(self, m_log): + fut = asyncio.Future(loop=self.loop) + fut.set_exception(RuntimeError('boom')) + self.assertRaises(RuntimeError, fut.result) + del fut + self.assertFalse(m_log.error.called) + + @unittest.skipIf(concurrent is None, 'need concurrent.futures') + def test_wrap_future(self): + + def run(arg): + return (arg, get_thread_ident()) + ex = concurrent.futures.ThreadPoolExecutor(1) + f1 = ex.submit(run, 'oi') + f2 = asyncio.wrap_future(f1, loop=self.loop) + res, ident = self.loop.run_until_complete(f2) + self.assertIsInstance(f2, asyncio.Future) + self.assertEqual(res, 'oi') + self.assertNotEqual(ident, get_thread_ident()) + + def test_wrap_future_future(self): + f1 = asyncio.Future(loop=self.loop) + f2 = asyncio.wrap_future(f1) + self.assertIs(f1, f2) + + @unittest.skipIf(concurrent is None, 'need concurrent.futures') + @mock.patch('trollius.futures.events') + def test_wrap_future_use_global_loop(self, m_events): + def run(arg): + return (arg, get_thread_ident()) + ex = concurrent.futures.ThreadPoolExecutor(1) + f1 = ex.submit(run, 'oi') + f2 = asyncio.wrap_future(f1) + self.assertIs(m_events.get_event_loop.return_value, f2._loop) + + @unittest.skipIf(concurrent is None, 'need concurrent.futures') + def test_wrap_future_cancel(self): + f1 = concurrent.futures.Future() + f2 = asyncio.wrap_future(f1, loop=self.loop) + f2.cancel() + test_utils.run_briefly(self.loop) + self.assertTrue(f1.cancelled()) + self.assertTrue(f2.cancelled()) + + @unittest.skipIf(concurrent is None, 'need concurrent.futures') + def test_wrap_future_cancel2(self): + f1 = concurrent.futures.Future() + f2 = asyncio.wrap_future(f1, loop=self.loop) + f1.set_result(42) + f2.cancel() + test_utils.run_briefly(self.loop) + self.assertFalse(f1.cancelled()) + self.assertEqual(f1.result(), 42) + self.assertTrue(f2.cancelled()) + + def test_future_source_traceback(self): + self.loop.set_debug(True) + + future = asyncio.Future(loop=self.loop) + lineno = sys._getframe().f_lineno - 1 + self.assertIsInstance(future._source_traceback, list) + filename = sys._getframe().f_code.co_filename + self.assertEqual(future._source_traceback[-1][:3], + (filename, + lineno, + 'test_future_source_traceback')) + + @mock.patch('trollius.base_events.logger') + def check_future_exception_never_retrieved(self, debug, m_log): + self.loop.set_debug(debug) + + def memory_error(): + try: + raise MemoryError() + except BaseException as exc: + return exc + exc = memory_error() + + future = asyncio.Future(loop=self.loop) + if debug: + source_traceback = future._source_traceback + future.set_exception(exc) + future = None + test_utils.run_briefly(self.loop) + support.gc_collect() + + if sys.version_info >= (3, 4): + if debug: + frame = source_traceback[-1] + regex = (r'^Future exception was never retrieved\n' + r'future: \n' + r'source_traceback: Object ' + r'created at \(most recent call last\):\n' + r' File' + r'.*\n' + r' File "{filename}", line {lineno}, ' + r'in check_future_exception_never_retrieved\n' + r' future = asyncio\.Future\(loop=self\.loop\)$' + ).format(filename=re.escape(frame[0]), + lineno=frame[1]) + else: + regex = (r'^Future exception was never retrieved\n' + r'future: ' + r'$' + ) + exc_info = (type(exc), exc, exc.__traceback__) + m_log.error.assert_called_once_with(mock.ANY, exc_info=exc_info) + else: + if debug: + frame = source_traceback[-1] + regex = (r'^Future/Task exception was never retrieved\n' + r'Future/Task created at \(most recent call last\):\n' + r' File' + r'.*\n' + r' File "{filename}", line {lineno}, ' + r'in check_future_exception_never_retrieved\n' + r' future = asyncio\.Future\(loop=self\.loop\)\n' + r'Traceback \(most recent call last\):\n' + r'.*\n' + r'MemoryError$' + ).format(filename=re.escape(frame[0]), + lineno=frame[1]) + elif six.PY3: + regex = (r'^Future/Task exception was never retrieved\n' + r'Traceback \(most recent call last\):\n' + r'.*\n' + r'MemoryError$' + ) + else: + regex = (r'^Future/Task exception was never retrieved\n' + r'MemoryError$' + ) + m_log.error.assert_called_once_with(mock.ANY, exc_info=False) + message = m_log.error.call_args[0][0] + self.assertRegex(message, re.compile(regex, re.DOTALL)) + + def test_future_exception_never_retrieved(self): + self.check_future_exception_never_retrieved(False) + + def test_future_exception_never_retrieved_debug(self): + self.check_future_exception_never_retrieved(True) + + def test_set_result_unless_cancelled(self): + fut = asyncio.Future(loop=self.loop) + fut.cancel() + fut._set_result_unless_cancelled(2) + self.assertTrue(fut.cancelled()) + + +class FutureDoneCallbackTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def run_briefly(self): + test_utils.run_briefly(self.loop) + + def _make_callback(self, bag, thing): + # Create a callback function that appends thing to bag. + def bag_appender(future): + bag.append(thing) + return bag_appender + + def _new_future(self): + return asyncio.Future(loop=self.loop) + + def test_callbacks_invoked_on_set_result(self): + bag = [] + f = self._new_future() + f.add_done_callback(self._make_callback(bag, 42)) + f.add_done_callback(self._make_callback(bag, 17)) + + self.assertEqual(bag, []) + f.set_result('foo') + + self.run_briefly() + + self.assertEqual(bag, [42, 17]) + self.assertEqual(f.result(), 'foo') + + def test_callbacks_invoked_on_set_exception(self): + bag = [] + f = self._new_future() + f.add_done_callback(self._make_callback(bag, 100)) + + self.assertEqual(bag, []) + exc = RuntimeError() + f.set_exception(exc) + + self.run_briefly() + + self.assertEqual(bag, [100]) + self.assertEqual(f.exception(), exc) + + def test_remove_done_callback(self): + bag = [] + f = self._new_future() + cb1 = self._make_callback(bag, 1) + cb2 = self._make_callback(bag, 2) + cb3 = self._make_callback(bag, 3) + + # Add one cb1 and one cb2. + f.add_done_callback(cb1) + f.add_done_callback(cb2) + + # One instance of cb2 removed. Now there's only one cb1. + self.assertEqual(f.remove_done_callback(cb2), 1) + + # Never had any cb3 in there. + self.assertEqual(f.remove_done_callback(cb3), 0) + + # After this there will be 6 instances of cb1 and one of cb2. + f.add_done_callback(cb2) + for i in range(5): + f.add_done_callback(cb1) + + # Remove all instances of cb1. One cb2 remains. + self.assertEqual(f.remove_done_callback(cb1), 6) + + self.assertEqual(bag, []) + f.set_result('foo') + + self.run_briefly() + + self.assertEqual(bag, [2]) + self.assertEqual(f.result(), 'foo') + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_locks.py b/tests/test_locks.py new file mode 100644 index 00000000..71a6cb36 --- /dev/null +++ b/tests/test_locks.py @@ -0,0 +1,865 @@ +"""Tests for lock.py""" + +import re + +import trollius as asyncio +from trollius import From, Return +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +STR_RGX_REPR = ( + r'^<(?P.*?) object at (?P
.*?)' + r'\[(?P' + r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?' + r')\]>\Z' +) +RGX_REPR = re.compile(STR_RGX_REPR) + + +class LockTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def test_ctor_loop(self): + loop = mock.Mock() + lock = asyncio.Lock(loop=loop) + self.assertIs(lock._loop, loop) + + lock = asyncio.Lock(loop=self.loop) + self.assertIs(lock._loop, self.loop) + + def test_ctor_noloop(self): + asyncio.set_event_loop(self.loop) + lock = asyncio.Lock() + self.assertIs(lock._loop, self.loop) + + def test_repr(self): + lock = asyncio.Lock(loop=self.loop) + self.assertTrue(repr(lock).endswith('[unlocked]>')) + self.assertTrue(RGX_REPR.match(repr(lock))) + + @asyncio.coroutine + def acquire_lock(): + yield From(lock.acquire()) + + self.loop.run_until_complete(acquire_lock()) + self.assertTrue(repr(lock).endswith('[locked]>')) + self.assertTrue(RGX_REPR.match(repr(lock))) + + def test_lock(self): + lock = asyncio.Lock(loop=self.loop) + + @asyncio.coroutine + def acquire_lock(): + yield From(lock.acquire()) + raise Return(lock) + + res = self.loop.run_until_complete(acquire_lock()) + + self.assertTrue(res) + self.assertTrue(lock.locked()) + + lock.release() + self.assertFalse(lock.locked()) + + def test_acquire(self): + lock = asyncio.Lock(loop=self.loop) + result = [] + + self.assertTrue(self.loop.run_until_complete(lock.acquire())) + + @asyncio.coroutine + def c1(result): + if (yield From(lock.acquire())): + result.append(1) + raise Return(True) + + @asyncio.coroutine + def c2(result): + if (yield From(lock.acquire())): + result.append(2) + raise Return(True) + + @asyncio.coroutine + def c3(result): + if (yield From(lock.acquire())): + result.append(3) + raise Return(True) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + lock.release() + test_utils.run_briefly(self.loop) + self.assertEqual([1], result) + + test_utils.run_briefly(self.loop) + self.assertEqual([1], result) + + t3 = asyncio.Task(c3(result), loop=self.loop) + + lock.release() + test_utils.run_briefly(self.loop) + self.assertEqual([1, 2], result) + + lock.release() + test_utils.run_briefly(self.loop) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + def test_acquire_cancel(self): + lock = asyncio.Lock(loop=self.loop) + self.assertTrue(self.loop.run_until_complete(lock.acquire())) + + task = asyncio.Task(lock.acquire(), loop=self.loop) + self.loop.call_soon(task.cancel) + self.assertRaises( + asyncio.CancelledError, + self.loop.run_until_complete, task) + self.assertFalse(lock._waiters) + + def test_cancel_race(self): + # Several tasks: + # - A acquires the lock + # - B is blocked in aqcuire() + # - C is blocked in aqcuire() + # + # Now, concurrently: + # - B is cancelled + # - A releases the lock + # + # If B's waiter is marked cancelled but not yet removed from + # _waiters, A's release() call will crash when trying to set + # B's waiter; instead, it should move on to C's waiter. + + # Setup: A has the lock, b and c are waiting. + lock = asyncio.Lock(loop=self.loop) + + @asyncio.coroutine + def lockit(name, blocker): + yield From(lock.acquire()) + try: + if blocker is not None: + yield From(blocker) + finally: + lock.release() + + fa = asyncio.Future(loop=self.loop) + ta = asyncio.Task(lockit('A', fa), loop=self.loop) + test_utils.run_briefly(self.loop, 2) + self.assertTrue(lock.locked()) + tb = asyncio.Task(lockit('B', None), loop=self.loop) + test_utils.run_briefly(self.loop, 2) + self.assertEqual(len(lock._waiters), 1) + tc = asyncio.Task(lockit('C', None), loop=self.loop) + test_utils.run_briefly(self.loop, 2) + self.assertEqual(len(lock._waiters), 2) + + # Create the race and check. + # Without the fix this failed at the last assert. + fa.set_result(None) + tb.cancel() + self.assertTrue(lock._waiters[0].cancelled()) + test_utils.run_briefly(self.loop, 2) + self.assertFalse(lock.locked()) + self.assertTrue(ta.done()) + self.assertTrue(tb.cancelled()) + self.assertTrue(tc.done()) + + def test_release_not_acquired(self): + lock = asyncio.Lock(loop=self.loop) + + self.assertRaises(RuntimeError, lock.release) + + def test_release_no_waiters(self): + lock = asyncio.Lock(loop=self.loop) + self.loop.run_until_complete(lock.acquire()) + self.assertTrue(lock.locked()) + + lock.release() + self.assertFalse(lock.locked()) + + def test_context_manager(self): + lock = asyncio.Lock(loop=self.loop) + + @asyncio.coroutine + def acquire_lock(): + raise Return((yield From(lock))) + + with self.loop.run_until_complete(acquire_lock()): + self.assertTrue(lock.locked()) + + self.assertFalse(lock.locked()) + + def test_context_manager_cant_reuse(self): + lock = asyncio.Lock(loop=self.loop) + + @asyncio.coroutine + def acquire_lock(): + raise Return((yield From(lock))) + + # This spells "yield From(lock)" outside a generator. + cm = self.loop.run_until_complete(acquire_lock()) + with cm: + self.assertTrue(lock.locked()) + + self.assertFalse(lock.locked()) + + with self.assertRaises(AttributeError): + with cm: + pass + + def test_context_manager_no_yield(self): + lock = asyncio.Lock(loop=self.loop) + + try: + with lock: + self.fail('RuntimeError is not raised in with expression') + except RuntimeError as err: + self.assertEqual( + str(err), + '"yield From" should be used as context manager expression') + + self.assertFalse(lock.locked()) + + +class EventTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def test_ctor_loop(self): + loop = mock.Mock() + ev = asyncio.Event(loop=loop) + self.assertIs(ev._loop, loop) + + ev = asyncio.Event(loop=self.loop) + self.assertIs(ev._loop, self.loop) + + def test_ctor_noloop(self): + asyncio.set_event_loop(self.loop) + ev = asyncio.Event() + self.assertIs(ev._loop, self.loop) + + def test_repr(self): + ev = asyncio.Event(loop=self.loop) + self.assertTrue(repr(ev).endswith('[unset]>')) + match = RGX_REPR.match(repr(ev)) + self.assertEqual(match.group('extras'), 'unset') + + ev.set() + self.assertTrue(repr(ev).endswith('[set]>')) + self.assertTrue(RGX_REPR.match(repr(ev))) + + ev._waiters.append(mock.Mock()) + self.assertTrue('waiters:1' in repr(ev)) + self.assertTrue(RGX_REPR.match(repr(ev))) + + def test_wait(self): + ev = asyncio.Event(loop=self.loop) + self.assertFalse(ev.is_set()) + + result = [] + + @asyncio.coroutine + def c1(result): + if (yield From(ev.wait())): + result.append(1) + + @asyncio.coroutine + def c2(result): + if (yield From(ev.wait())): + result.append(2) + + @asyncio.coroutine + def c3(result): + if (yield From(ev.wait())): + result.append(3) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + + test_utils.run_briefly(self.loop, 2) + self.assertEqual([], result) + + t3 = asyncio.Task(c3(result), loop=self.loop) + + ev.set() + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertIsNone(t1.result()) + self.assertTrue(t2.done()) + self.assertIsNone(t2.result()) + self.assertTrue(t3.done()) + self.assertIsNone(t3.result()) + + def test_wait_on_set(self): + ev = asyncio.Event(loop=self.loop) + ev.set() + + res = self.loop.run_until_complete(ev.wait()) + self.assertTrue(res) + + def test_wait_cancel(self): + ev = asyncio.Event(loop=self.loop) + + wait = asyncio.Task(ev.wait(), loop=self.loop) + self.loop.call_soon(wait.cancel) + self.assertRaises( + asyncio.CancelledError, + self.loop.run_until_complete, wait) + self.assertFalse(ev._waiters) + + def test_clear(self): + ev = asyncio.Event(loop=self.loop) + self.assertFalse(ev.is_set()) + + ev.set() + self.assertTrue(ev.is_set()) + + ev.clear() + self.assertFalse(ev.is_set()) + + def test_clear_with_waiters(self): + ev = asyncio.Event(loop=self.loop) + result = [] + + @asyncio.coroutine + def c1(result): + if (yield From(ev.wait())): + result.append(1) + raise Return(True) + + t = asyncio.Task(c1(result), loop=self.loop) + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + ev.set() + ev.clear() + self.assertFalse(ev.is_set()) + + ev.set() + ev.set() + self.assertEqual(1, len(ev._waiters)) + + test_utils.run_briefly(self.loop) + self.assertEqual([1], result) + self.assertEqual(0, len(ev._waiters)) + + self.assertTrue(t.done()) + self.assertTrue(t.result()) + + +class ConditionTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def test_ctor_loop(self): + loop = mock.Mock() + cond = asyncio.Condition(loop=loop) + self.assertIs(cond._loop, loop) + + cond = asyncio.Condition(loop=self.loop) + self.assertIs(cond._loop, self.loop) + + def test_ctor_noloop(self): + asyncio.set_event_loop(self.loop) + cond = asyncio.Condition() + self.assertIs(cond._loop, self.loop) + + def test_wait(self): + cond = asyncio.Condition(loop=self.loop) + result = [] + + @asyncio.coroutine + def c1(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(1) + raise Return(True) + + @asyncio.coroutine + def c2(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(2) + raise Return(True) + + @asyncio.coroutine + def c3(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(3) + raise Return(True) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + t3 = asyncio.Task(c3(result), loop=self.loop) + + test_utils.run_briefly(self.loop, 2) + self.assertEqual([], result) + self.assertFalse(cond.locked()) + + self.assertTrue(self.loop.run_until_complete(cond.acquire())) + cond.notify() + test_utils.run_briefly(self.loop, 2) + self.assertEqual([], result) + self.assertTrue(cond.locked()) + + cond.release() + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.notify(2) + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.release() + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1, 2], result) + self.assertTrue(cond.locked()) + + cond.release() + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1, 2, 3], result) + self.assertTrue(cond.locked()) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + def test_wait_cancel(self): + cond = asyncio.Condition(loop=self.loop) + self.loop.run_until_complete(cond.acquire()) + + wait = asyncio.Task(cond.wait(), loop=self.loop) + self.loop.call_soon(wait.cancel) + self.assertRaises( + asyncio.CancelledError, + self.loop.run_until_complete, wait) + self.assertFalse(cond._waiters) + self.assertTrue(cond.locked()) + + def test_wait_unacquired(self): + cond = asyncio.Condition(loop=self.loop) + self.assertRaises( + RuntimeError, + self.loop.run_until_complete, cond.wait()) + + def test_wait_for(self): + cond = asyncio.Condition(loop=self.loop) + presult = False + + def predicate(): + return presult + + result = [] + + @asyncio.coroutine + def c1(result): + yield From(cond.acquire()) + if (yield From(cond.wait_for(predicate))): + result.append(1) + cond.release() + raise Return(True) + + t = asyncio.Task(c1(result), loop=self.loop) + + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + self.loop.run_until_complete(cond.acquire()) + cond.notify() + cond.release() + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + presult = True + self.loop.run_until_complete(cond.acquire()) + cond.notify() + cond.release() + test_utils.run_briefly(self.loop) + self.assertEqual([1], result) + + self.assertTrue(t.done()) + self.assertTrue(t.result()) + + def test_wait_for_unacquired(self): + cond = asyncio.Condition(loop=self.loop) + + # predicate can return true immediately + res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3])) + self.assertEqual([1, 2, 3], res) + + self.assertRaises( + RuntimeError, + self.loop.run_until_complete, + cond.wait_for(lambda: False)) + + def test_notify(self): + cond = asyncio.Condition(loop=self.loop) + result = [] + + @asyncio.coroutine + def c1(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(1) + cond.release() + raise Return(True) + + @asyncio.coroutine + def c2(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(2) + cond.release() + raise Return(True) + + @asyncio.coroutine + def c3(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(3) + cond.release() + raise Return(True) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + t3 = asyncio.Task(c3(result), loop=self.loop) + + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + self.loop.run_until_complete(cond.acquire()) + cond.notify(1) + cond.release() + # each coroutine requires 2 runs of the event loop + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1], result) + + self.loop.run_until_complete(cond.acquire()) + cond.notify(1) + cond.notify(2048) + cond.release() + # each coroutine requires 2 runs of the event loop + test_utils.run_briefly(self.loop, 4) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + def test_notify_all(self): + cond = asyncio.Condition(loop=self.loop) + + result = [] + + @asyncio.coroutine + def c1(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(1) + cond.release() + raise Return(True) + + @asyncio.coroutine + def c2(result): + yield From(cond.acquire()) + if (yield From(cond.wait())): + result.append(2) + cond.release() + raise Return(True) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + + test_utils.run_briefly(self.loop) + self.assertEqual([], result) + + self.loop.run_until_complete(cond.acquire()) + cond.notify_all() + cond.release() + # each coroutine requires 2 runs of the event loop + test_utils.run_briefly(self.loop, 4) + self.assertEqual([1, 2], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + + def test_notify_unacquired(self): + cond = asyncio.Condition(loop=self.loop) + self.assertRaises(RuntimeError, cond.notify) + + def test_notify_all_unacquired(self): + cond = asyncio.Condition(loop=self.loop) + self.assertRaises(RuntimeError, cond.notify_all) + + def test_repr(self): + cond = asyncio.Condition(loop=self.loop) + self.assertTrue('unlocked' in repr(cond)) + self.assertTrue(RGX_REPR.match(repr(cond))) + + self.loop.run_until_complete(cond.acquire()) + self.assertTrue('locked' in repr(cond)) + + cond._waiters.append(mock.Mock()) + self.assertTrue('waiters:1' in repr(cond)) + self.assertTrue(RGX_REPR.match(repr(cond))) + + cond._waiters.append(mock.Mock()) + self.assertTrue('waiters:2' in repr(cond)) + self.assertTrue(RGX_REPR.match(repr(cond))) + + def test_context_manager(self): + cond = asyncio.Condition(loop=self.loop) + + @asyncio.coroutine + def acquire_cond(): + raise Return((yield From(cond))) + + with self.loop.run_until_complete(acquire_cond()): + self.assertTrue(cond.locked()) + + self.assertFalse(cond.locked()) + + def test_context_manager_no_yield(self): + cond = asyncio.Condition(loop=self.loop) + + try: + with cond: + self.fail('RuntimeError is not raised in with expression') + except RuntimeError as err: + self.assertEqual( + str(err), + '"yield From" should be used as context manager expression') + + self.assertFalse(cond.locked()) + + def test_explicit_lock(self): + lock = asyncio.Lock(loop=self.loop) + cond = asyncio.Condition(lock, loop=self.loop) + + self.assertIs(cond._lock, lock) + self.assertIs(cond._loop, lock._loop) + + def test_ambiguous_loops(self): + loop = self.new_test_loop() + self.addCleanup(loop.close) + + lock = asyncio.Lock(loop=self.loop) + with self.assertRaises(ValueError): + asyncio.Condition(lock, loop=loop) + + +class SemaphoreTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def test_ctor_loop(self): + loop = mock.Mock() + sem = asyncio.Semaphore(loop=loop) + self.assertIs(sem._loop, loop) + + sem = asyncio.Semaphore(loop=self.loop) + self.assertIs(sem._loop, self.loop) + + def test_ctor_noloop(self): + asyncio.set_event_loop(self.loop) + sem = asyncio.Semaphore() + self.assertIs(sem._loop, self.loop) + + def test_initial_value_zero(self): + sem = asyncio.Semaphore(0, loop=self.loop) + self.assertTrue(sem.locked()) + + def test_repr(self): + sem = asyncio.Semaphore(loop=self.loop) + self.assertTrue(repr(sem).endswith('[unlocked,value:1]>')) + self.assertTrue(RGX_REPR.match(repr(sem))) + + self.loop.run_until_complete(sem.acquire()) + self.assertTrue(repr(sem).endswith('[locked]>')) + self.assertTrue('waiters' not in repr(sem)) + self.assertTrue(RGX_REPR.match(repr(sem))) + + sem._waiters.append(mock.Mock()) + self.assertTrue('waiters:1' in repr(sem)) + self.assertTrue(RGX_REPR.match(repr(sem))) + + sem._waiters.append(mock.Mock()) + self.assertTrue('waiters:2' in repr(sem)) + self.assertTrue(RGX_REPR.match(repr(sem))) + + def test_semaphore(self): + sem = asyncio.Semaphore(loop=self.loop) + self.assertEqual(1, sem._value) + + @asyncio.coroutine + def acquire_lock(): + yield From(sem.acquire()) + raise Return(sem) + + res = self.loop.run_until_complete(acquire_lock()) + + self.assertTrue(res) + self.assertTrue(sem.locked()) + self.assertEqual(0, sem._value) + + sem.release() + self.assertFalse(sem.locked()) + self.assertEqual(1, sem._value) + + def test_semaphore_value(self): + self.assertRaises(ValueError, asyncio.Semaphore, -1) + + def test_acquire(self): + sem = asyncio.Semaphore(3, loop=self.loop) + result = [] + + self.assertTrue(self.loop.run_until_complete(sem.acquire())) + self.assertTrue(self.loop.run_until_complete(sem.acquire())) + self.assertFalse(sem.locked()) + + @asyncio.coroutine + def c1(result): + yield From(sem.acquire()) + result.append(1) + raise Return(True) + + @asyncio.coroutine + def c2(result): + yield From(sem.acquire()) + result.append(2) + raise Return(True) + + @asyncio.coroutine + def c3(result): + yield From(sem.acquire()) + result.append(3) + raise Return(True) + + @asyncio.coroutine + def c4(result): + yield From(sem.acquire()) + result.append(4) + raise Return(True) + + t1 = asyncio.Task(c1(result), loop=self.loop) + t2 = asyncio.Task(c2(result), loop=self.loop) + t3 = asyncio.Task(c3(result), loop=self.loop) + + # each coroutine requires 2 runs of the event loop + test_utils.run_briefly(self.loop, 2) + self.assertEqual([1], result) + self.assertTrue(sem.locked()) + self.assertEqual(2, len(sem._waiters)) + self.assertEqual(0, sem._value) + + t4 = asyncio.Task(c4(result), loop=self.loop) + + sem.release() + sem.release() + self.assertEqual(2, sem._value) + + test_utils.run_briefly(self.loop) + self.assertEqual(0, sem._value) + self.assertEqual([1, 2, 3], result) + self.assertTrue(sem.locked()) + self.assertEqual(1, len(sem._waiters)) + self.assertEqual(0, sem._value) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + self.assertFalse(t4.done()) + + # cleanup locked semaphore + sem.release() + self.loop.run_until_complete(t4) + + def test_acquire_cancel(self): + sem = asyncio.Semaphore(loop=self.loop) + self.loop.run_until_complete(sem.acquire()) + + acquire = asyncio.Task(sem.acquire(), loop=self.loop) + self.loop.call_soon(acquire.cancel) + self.assertRaises( + asyncio.CancelledError, + self.loop.run_until_complete, acquire) + self.assertFalse(sem._waiters) + + def test_release_not_acquired(self): + sem = asyncio.BoundedSemaphore(loop=self.loop) + + self.assertRaises(ValueError, sem.release) + + def test_release_no_waiters(self): + sem = asyncio.Semaphore(loop=self.loop) + self.loop.run_until_complete(sem.acquire()) + self.assertTrue(sem.locked()) + + sem.release() + self.assertFalse(sem.locked()) + + def test_context_manager(self): + sem = asyncio.Semaphore(2, loop=self.loop) + + @asyncio.coroutine + def acquire_lock(): + raise Return((yield From(sem))) + + with self.loop.run_until_complete(acquire_lock()): + self.assertFalse(sem.locked()) + self.assertEqual(1, sem._value) + + with self.loop.run_until_complete(acquire_lock()): + self.assertTrue(sem.locked()) + + self.assertEqual(2, sem._value) + + def test_context_manager_no_yield(self): + sem = asyncio.Semaphore(2, loop=self.loop) + + try: + with sem: + self.fail('RuntimeError is not raised in with expression') + except RuntimeError as err: + self.assertEqual( + str(err), + '"yield From" should be used as context manager expression') + + self.assertEqual(2, sem._value) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_proactor_events.py b/tests/test_proactor_events.py new file mode 100644 index 00000000..ceb28e2b --- /dev/null +++ b/tests/test_proactor_events.py @@ -0,0 +1,592 @@ +"""Tests for proactor_events.py""" + +import socket + +from trollius import test_utils +from trollius.proactor_events import BaseProactorEventLoop +from trollius.proactor_events import _ProactorDuplexPipeTransport +from trollius.proactor_events import _ProactorSocketTransport +from trollius.proactor_events import _ProactorWritePipeTransport +from trollius.py33_exceptions import ConnectionAbortedError, ConnectionResetError +from trollius.test_utils import mock +from trollius.test_utils import unittest +import trollius as asyncio + + +def close_transport(transport): + # Don't call transport.close() because the event loop and the IOCP proactor + # are mocked + if transport._sock is None: + return + transport._sock.close() + transport._sock = None + + +class ProactorSocketTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.addCleanup(self.loop.close) + self.proactor = mock.Mock() + self.loop._proactor = self.proactor + self.protocol = test_utils.make_test_protocol(asyncio.Protocol) + self.sock = mock.Mock(socket.socket) + + def socket_transport(self, waiter=None): + transport = _ProactorSocketTransport(self.loop, self.sock, + self.protocol, waiter=waiter) + self.addCleanup(close_transport, transport) + return transport + + def test_ctor(self): + fut = asyncio.Future(loop=self.loop) + tr = self.socket_transport(waiter=fut) + test_utils.run_briefly(self.loop) + self.assertIsNone(fut.result()) + self.protocol.connection_made(tr) + self.proactor.recv.assert_called_with(self.sock, 4096) + + def test_loop_reading(self): + tr = self.socket_transport() + tr._loop_reading() + self.loop._proactor.recv.assert_called_with(self.sock, 4096) + self.assertFalse(self.protocol.data_received.called) + self.assertFalse(self.protocol.eof_received.called) + + def test_loop_reading_data(self): + res = asyncio.Future(loop=self.loop) + res.set_result(b'data') + + tr = self.socket_transport() + tr._read_fut = res + tr._loop_reading(res) + self.loop._proactor.recv.assert_called_with(self.sock, 4096) + self.protocol.data_received.assert_called_with(b'data') + + def test_loop_reading_no_data(self): + res = asyncio.Future(loop=self.loop) + res.set_result(b'') + + tr = self.socket_transport() + self.assertRaises(AssertionError, tr._loop_reading, res) + + tr.close = mock.Mock() + tr._read_fut = res + tr._loop_reading(res) + self.assertFalse(self.loop._proactor.recv.called) + self.assertTrue(self.protocol.eof_received.called) + self.assertTrue(tr.close.called) + + def test_loop_reading_aborted(self): + err = self.loop._proactor.recv.side_effect = ConnectionAbortedError() + + tr = self.socket_transport() + tr._fatal_error = mock.Mock() + tr._loop_reading() + tr._fatal_error.assert_called_with( + err, + 'Fatal read error on pipe transport') + + def test_loop_reading_aborted_closing(self): + self.loop._proactor.recv.side_effect = ConnectionAbortedError() + + tr = self.socket_transport() + tr._closing = True + tr._fatal_error = mock.Mock() + tr._loop_reading() + self.assertFalse(tr._fatal_error.called) + + def test_loop_reading_aborted_is_fatal(self): + self.loop._proactor.recv.side_effect = ConnectionAbortedError() + tr = self.socket_transport() + tr._closing = False + tr._fatal_error = mock.Mock() + tr._loop_reading() + self.assertTrue(tr._fatal_error.called) + + def test_loop_reading_conn_reset_lost(self): + err = self.loop._proactor.recv.side_effect = ConnectionResetError() + + tr = self.socket_transport() + tr._closing = False + tr._fatal_error = mock.Mock() + tr._force_close = mock.Mock() + tr._loop_reading() + self.assertFalse(tr._fatal_error.called) + tr._force_close.assert_called_with(err) + + def test_loop_reading_exception(self): + err = self.loop._proactor.recv.side_effect = (OSError()) + + tr = self.socket_transport() + tr._fatal_error = mock.Mock() + tr._loop_reading() + tr._fatal_error.assert_called_with( + err, + 'Fatal read error on pipe transport') + + def test_write(self): + tr = self.socket_transport() + tr._loop_writing = mock.Mock() + tr.write(b'data') + self.assertEqual(tr._buffer, None) + tr._loop_writing.assert_called_with(data=b'data') + + def test_write_no_data(self): + tr = self.socket_transport() + tr.write(b'') + self.assertFalse(tr._buffer) + + def test_write_more(self): + tr = self.socket_transport() + tr._write_fut = mock.Mock() + tr._loop_writing = mock.Mock() + tr.write(b'data') + self.assertEqual(tr._buffer, b'data') + self.assertFalse(tr._loop_writing.called) + + def test_loop_writing(self): + tr = self.socket_transport() + tr._buffer = bytearray(b'data') + tr._loop_writing() + self.loop._proactor.send.assert_called_with(self.sock, b'data') + self.loop._proactor.send.return_value.add_done_callback.\ + assert_called_with(tr._loop_writing) + + @mock.patch('trollius.proactor_events.logger') + def test_loop_writing_err(self, m_log): + err = self.loop._proactor.send.side_effect = OSError() + tr = self.socket_transport() + tr._fatal_error = mock.Mock() + tr._buffer = [b'da', b'ta'] + tr._loop_writing() + tr._fatal_error.assert_called_with( + err, + 'Fatal write error on pipe transport') + tr._conn_lost = 1 + + tr.write(b'data') + tr.write(b'data') + tr.write(b'data') + tr.write(b'data') + tr.write(b'data') + self.assertEqual(tr._buffer, None) + m_log.warning.assert_called_with('socket.send() raised exception.') + + def test_loop_writing_stop(self): + fut = asyncio.Future(loop=self.loop) + fut.set_result(b'data') + + tr = self.socket_transport() + tr._write_fut = fut + tr._loop_writing(fut) + self.assertIsNone(tr._write_fut) + + def test_loop_writing_closing(self): + fut = asyncio.Future(loop=self.loop) + fut.set_result(1) + + tr = self.socket_transport() + tr._write_fut = fut + tr.close() + tr._loop_writing(fut) + self.assertIsNone(tr._write_fut) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + + def test_abort(self): + tr = self.socket_transport() + tr._force_close = mock.Mock() + tr.abort() + tr._force_close.assert_called_with(None) + + def test_close(self): + tr = self.socket_transport() + tr.close() + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + self.assertTrue(tr._closing) + self.assertEqual(tr._conn_lost, 1) + + self.protocol.connection_lost.reset_mock() + tr.close() + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.connection_lost.called) + + def test_close_write_fut(self): + tr = self.socket_transport() + tr._write_fut = mock.Mock() + tr.close() + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.connection_lost.called) + + def test_close_buffer(self): + tr = self.socket_transport() + tr._buffer = [b'data'] + tr.close() + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.connection_lost.called) + + @mock.patch('trollius.base_events.logger') + def test_fatal_error(self, m_logging): + tr = self.socket_transport() + tr._force_close = mock.Mock() + tr._fatal_error(None) + self.assertTrue(tr._force_close.called) + self.assertTrue(m_logging.error.called) + + def test_force_close(self): + tr = self.socket_transport() + tr._buffer = [b'data'] + read_fut = tr._read_fut = mock.Mock() + write_fut = tr._write_fut = mock.Mock() + tr._force_close(None) + + read_fut.cancel.assert_called_with() + write_fut.cancel.assert_called_with() + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + self.assertEqual(None, tr._buffer) + self.assertEqual(tr._conn_lost, 1) + + def test_force_close_idempotent(self): + tr = self.socket_transport() + tr._closing = True + tr._force_close(None) + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.connection_lost.called) + + def test_fatal_error_2(self): + tr = self.socket_transport() + tr._buffer = [b'data'] + tr._force_close(None) + + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + self.assertEqual(None, tr._buffer) + + def test_call_connection_lost(self): + tr = self.socket_transport() + tr._call_connection_lost(None) + self.assertTrue(self.protocol.connection_lost.called) + self.assertTrue(self.sock.close.called) + + def test_write_eof(self): + tr = self.socket_transport() + self.assertTrue(tr.can_write_eof()) + tr.write_eof() + self.sock.shutdown.assert_called_with(socket.SHUT_WR) + tr.write_eof() + self.assertEqual(self.sock.shutdown.call_count, 1) + tr.close() + + def test_write_eof_buffer(self): + tr = self.socket_transport() + f = asyncio.Future(loop=self.loop) + tr._loop._proactor.send.return_value = f + tr.write(b'data') + tr.write_eof() + self.assertTrue(tr._eof_written) + self.assertFalse(self.sock.shutdown.called) + tr._loop._proactor.send.assert_called_with(self.sock, b'data') + f.set_result(4) + self.loop._run_once() + self.sock.shutdown.assert_called_with(socket.SHUT_WR) + tr.close() + + def test_write_eof_write_pipe(self): + tr = _ProactorWritePipeTransport( + self.loop, self.sock, self.protocol) + self.assertTrue(tr.can_write_eof()) + tr.write_eof() + self.assertTrue(tr._closing) + self.loop._run_once() + self.assertTrue(self.sock.close.called) + tr.close() + + def test_write_eof_buffer_write_pipe(self): + tr = _ProactorWritePipeTransport(self.loop, self.sock, self.protocol) + f = asyncio.Future(loop=self.loop) + tr._loop._proactor.send.return_value = f + tr.write(b'data') + tr.write_eof() + self.assertTrue(tr._closing) + self.assertFalse(self.sock.shutdown.called) + tr._loop._proactor.send.assert_called_with(self.sock, b'data') + f.set_result(4) + self.loop._run_once() + self.loop._run_once() + self.assertTrue(self.sock.close.called) + tr.close() + + def test_write_eof_duplex_pipe(self): + tr = _ProactorDuplexPipeTransport( + self.loop, self.sock, self.protocol) + self.assertFalse(tr.can_write_eof()) + with self.assertRaises(NotImplementedError): + tr.write_eof() + close_transport(tr) + + def test_pause_resume_reading(self): + tr = self.socket_transport() + futures = [] + for msg in [b'data1', b'data2', b'data3', b'data4', b'']: + f = asyncio.Future(loop=self.loop) + f.set_result(msg) + futures.append(f) + self.loop._proactor.recv.side_effect = futures + self.loop._run_once() + self.assertFalse(tr._paused) + self.loop._run_once() + self.protocol.data_received.assert_called_with(b'data1') + self.loop._run_once() + self.protocol.data_received.assert_called_with(b'data2') + tr.pause_reading() + self.assertTrue(tr._paused) + for i in range(10): + self.loop._run_once() + self.protocol.data_received.assert_called_with(b'data2') + tr.resume_reading() + self.assertFalse(tr._paused) + self.loop._run_once() + self.protocol.data_received.assert_called_with(b'data3') + self.loop._run_once() + self.protocol.data_received.assert_called_with(b'data4') + tr.close() + + + def pause_writing_transport(self, high): + tr = self.socket_transport() + tr.set_write_buffer_limits(high=high) + + self.assertEqual(tr.get_write_buffer_size(), 0) + self.assertFalse(self.protocol.pause_writing.called) + self.assertFalse(self.protocol.resume_writing.called) + return tr + + def test_pause_resume_writing(self): + tr = self.pause_writing_transport(high=4) + + # write a large chunk, must pause writing + fut = asyncio.Future(loop=self.loop) + self.loop._proactor.send.return_value = fut + tr.write(b'large data') + self.loop._run_once() + self.assertTrue(self.protocol.pause_writing.called) + + # flush the buffer + fut.set_result(None) + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 0) + self.assertTrue(self.protocol.resume_writing.called) + + def test_pause_writing_2write(self): + tr = self.pause_writing_transport(high=4) + + # first short write, the buffer is not full (3 <= 4) + fut1 = asyncio.Future(loop=self.loop) + self.loop._proactor.send.return_value = fut1 + tr.write(b'123') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 3) + self.assertFalse(self.protocol.pause_writing.called) + + # fill the buffer, must pause writing (6 > 4) + tr.write(b'abc') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 6) + self.assertTrue(self.protocol.pause_writing.called) + + def test_pause_writing_3write(self): + tr = self.pause_writing_transport(high=4) + + # first short write, the buffer is not full (1 <= 4) + fut = asyncio.Future(loop=self.loop) + self.loop._proactor.send.return_value = fut + tr.write(b'1') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 1) + self.assertFalse(self.protocol.pause_writing.called) + + # second short write, the buffer is not full (3 <= 4) + tr.write(b'23') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 3) + self.assertFalse(self.protocol.pause_writing.called) + + # fill the buffer, must pause writing (6 > 4) + tr.write(b'abc') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 6) + self.assertTrue(self.protocol.pause_writing.called) + + def test_dont_pause_writing(self): + tr = self.pause_writing_transport(high=4) + + # write a large chunk which completes immedialty, + # it should not pause writing + fut = asyncio.Future(loop=self.loop) + fut.set_result(None) + self.loop._proactor.send.return_value = fut + tr.write(b'very large data') + self.loop._run_once() + self.assertEqual(tr.get_write_buffer_size(), 0) + self.assertFalse(self.protocol.pause_writing.called) + + +class BaseProactorEventLoopTests(test_utils.TestCase): + + def setUp(self): + self.sock = mock.Mock(socket.socket) + self.proactor = mock.Mock() + + self.ssock, self.csock = mock.Mock(), mock.Mock() + + class EventLoop(BaseProactorEventLoop): + def _socketpair(s): + return (self.ssock, self.csock) + + self.loop = EventLoop(self.proactor) + self.set_event_loop(self.loop) + + @mock.patch.object(BaseProactorEventLoop, 'call_soon') + @mock.patch.object(BaseProactorEventLoop, '_socketpair') + def test_ctor(self, socketpair, call_soon): + ssock, csock = socketpair.return_value = ( + mock.Mock(), mock.Mock()) + loop = BaseProactorEventLoop(self.proactor) + self.assertIs(loop._ssock, ssock) + self.assertIs(loop._csock, csock) + self.assertEqual(loop._internal_fds, 1) + call_soon.assert_called_with(loop._loop_self_reading) + loop.close() + + def test_close_self_pipe(self): + self.loop._close_self_pipe() + self.assertEqual(self.loop._internal_fds, 0) + self.assertTrue(self.ssock.close.called) + self.assertTrue(self.csock.close.called) + self.assertIsNone(self.loop._ssock) + self.assertIsNone(self.loop._csock) + + # Don't call close(): _close_self_pipe() cannot be called twice + self.loop._closed = True + + def test_close(self): + self.loop._close_self_pipe = mock.Mock() + self.loop.close() + self.assertTrue(self.loop._close_self_pipe.called) + self.assertTrue(self.proactor.close.called) + self.assertIsNone(self.loop._proactor) + + self.loop._close_self_pipe.reset_mock() + self.loop.close() + self.assertFalse(self.loop._close_self_pipe.called) + + def test_sock_recv(self): + self.loop.sock_recv(self.sock, 1024) + self.proactor.recv.assert_called_with(self.sock, 1024) + + def test_sock_sendall(self): + self.loop.sock_sendall(self.sock, b'data') + self.proactor.send.assert_called_with(self.sock, b'data') + + def test_sock_connect(self): + self.loop.sock_connect(self.sock, 123) + self.proactor.connect.assert_called_with(self.sock, 123) + + def test_sock_accept(self): + self.loop.sock_accept(self.sock) + self.proactor.accept.assert_called_with(self.sock) + + def test_socketpair(self): + class EventLoop(BaseProactorEventLoop): + # override the destructor to not log a ResourceWarning + def __del__(self): + pass + self.assertRaises( + NotImplementedError, EventLoop, self.proactor) + + def test_make_socket_transport(self): + tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol()) + self.assertIsInstance(tr, _ProactorSocketTransport) + close_transport(tr) + + def test_loop_self_reading(self): + self.loop._loop_self_reading() + self.proactor.recv.assert_called_with(self.ssock, 4096) + self.proactor.recv.return_value.add_done_callback.assert_called_with( + self.loop._loop_self_reading) + + def test_loop_self_reading_fut(self): + fut = mock.Mock() + self.loop._loop_self_reading(fut) + self.assertTrue(fut.result.called) + self.proactor.recv.assert_called_with(self.ssock, 4096) + self.proactor.recv.return_value.add_done_callback.assert_called_with( + self.loop._loop_self_reading) + + def test_loop_self_reading_exception(self): + self.loop.close = mock.Mock() + self.loop.call_exception_handler = mock.Mock() + self.proactor.recv.side_effect = OSError() + self.loop._loop_self_reading() + self.assertTrue(self.loop.call_exception_handler.called) + + def test_write_to_self(self): + self.loop._write_to_self() + self.csock.send.assert_called_with(b'\0') + + def test_process_events(self): + self.loop._process_events([]) + + @mock.patch('trollius.base_events.logger') + def test_create_server(self, m_log): + pf = mock.Mock() + call_soon = self.loop.call_soon = mock.Mock() + + self.loop._start_serving(pf, self.sock) + self.assertTrue(call_soon.called) + + # callback + loop = call_soon.call_args[0][0] + loop() + self.proactor.accept.assert_called_with(self.sock) + + # conn + fut = mock.Mock() + fut.result.return_value = (mock.Mock(), mock.Mock()) + + make_tr = self.loop._make_socket_transport = mock.Mock() + loop(fut) + self.assertTrue(fut.result.called) + self.assertTrue(make_tr.called) + + # exception + fut.result.side_effect = OSError() + loop(fut) + self.assertTrue(self.sock.close.called) + self.assertTrue(m_log.error.called) + + def test_create_server_cancel(self): + pf = mock.Mock() + call_soon = self.loop.call_soon = mock.Mock() + + self.loop._start_serving(pf, self.sock) + loop = call_soon.call_args[0][0] + + # cancelled + fut = asyncio.Future(loop=self.loop) + fut.cancel() + loop(fut) + self.assertTrue(self.sock.close.called) + + def test_stop_serving(self): + sock = mock.Mock() + self.loop._stop_serving(sock) + self.assertTrue(sock.close.called) + self.proactor._stop_serving.assert_called_with(sock) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_py33_exceptions.py b/tests/test_py33_exceptions.py new file mode 100644 index 00000000..42fb4e23 --- /dev/null +++ b/tests/test_py33_exceptions.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +""" +Tests for py33_exceptions. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import unittest +from trollius import py33_exceptions + +class TestWrapErrors(unittest.TestCase): + + def test_ebadf_wrapped_to_OSError(self): + # https://github.com/jamadden/trollius/issues/17 + import socket + import os + import errno + s = socket.socket() + os.close(s.fileno()) + + with self.assertRaises(socket.error) as exc: + s.send(b'abc') + + self.assertEqual(exc.exception.errno, errno.EBADF) + + with self.assertRaises(OSError) as exc: + py33_exceptions.wrap_error(s.send, b'abc') + + self.assertEqual(exc.exception.errno, errno.EBADF) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_queues.py b/tests/test_queues.py new file mode 100644 index 00000000..75ef9889 --- /dev/null +++ b/tests/test_queues.py @@ -0,0 +1,587 @@ +"""Tests for queues.py""" + +import trollius as asyncio +from trollius import Return, From +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +class _QueueTestBase(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + +class QueueBasicTests(_QueueTestBase): + + def _test_repr_or_str(self, fn, expect_id): + """Test Queue's repr or str. + + fn is repr or str. expect_id is True if we expect the Queue's id to + appear in fn(Queue()). + """ + def gen(): + when = yield + self.assertAlmostEqual(0.1, when) + when = yield 0.1 + self.assertAlmostEqual(0.2, when) + yield 0.1 + + loop = self.new_test_loop(gen) + + q = asyncio.Queue(loop=loop) + self.assertTrue(fn(q).startswith('= (3,): + UNICODE_STR = 'unicode' +else: + UNICODE_STR = unicode('unicode') + try: + memoryview + except NameError: + # Python 2.6 + memoryview = buffer + +MOCK_ANY = mock.ANY + + +class TestBaseSelectorEventLoop(BaseSelectorEventLoop): + + def close(self): + # Don't call the close() method of the parent class, because the + # selector is mocked + self._closed = True + + def _make_self_pipe(self): + self._ssock = mock.Mock() + self._csock = mock.Mock() + self._internal_fds += 1 + + +def list_to_buffer(l=()): + return bytearray().join(l) + + +def close_transport(transport): + # Don't call transport.close() because the event loop and the selector + # are mocked + if transport._sock is None: + return + transport._sock.close() + transport._sock = None + + +class BaseSelectorEventLoopTests(test_utils.TestCase): + + def setUp(self): + self.selector = mock.Mock() + self.selector.select.return_value = [] + self.loop = TestBaseSelectorEventLoop(self.selector) + self.set_event_loop(self.loop) + + def test_make_socket_transport(self): + m = mock.Mock() + self.loop.add_reader = mock.Mock() + self.loop.add_reader._is_coroutine = False + transport = self.loop._make_socket_transport(m, asyncio.Protocol()) + self.assertIsInstance(transport, _SelectorSocketTransport) + + # Calling repr() must not fail when the event loop is closed + self.loop.close() + repr(transport) + + close_transport(transport) + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_make_ssl_transport(self): + m = mock.Mock() + self.loop.add_reader = mock.Mock() + self.loop.add_reader._is_coroutine = False + self.loop.add_writer = mock.Mock() + self.loop.remove_reader = mock.Mock() + self.loop.remove_writer = mock.Mock() + waiter = asyncio.Future(loop=self.loop) + with test_utils.disable_logger(): + transport = self.loop._make_ssl_transport( + m, asyncio.Protocol(), m, waiter) + # execute the handshake while the logger is disabled + # to ignore SSL handshake failure + test_utils.run_briefly(self.loop) + + # Sanity check + class_name = transport.__class__.__name__ + self.assertIn("ssl", class_name.lower()) + self.assertIn("transport", class_name.lower()) + + transport.close() + # execute pending callbacks to close the socket transport + test_utils.run_briefly(self.loop) + + @mock.patch('trollius.selector_events.ssl', None) + @mock.patch('trollius.sslproto.ssl', None) + def test_make_ssl_transport_without_ssl_error(self): + m = mock.Mock() + self.loop.add_reader = mock.Mock() + self.loop.add_writer = mock.Mock() + self.loop.remove_reader = mock.Mock() + self.loop.remove_writer = mock.Mock() + with self.assertRaises(RuntimeError): + self.loop._make_ssl_transport(m, m, m, m) + + def test_close(self): + class EventLoop(BaseSelectorEventLoop): + def _make_self_pipe(self): + self._ssock = mock.Mock() + self._csock = mock.Mock() + self._internal_fds += 1 + + self.loop = EventLoop(self.selector) + self.set_event_loop(self.loop) + + ssock = self.loop._ssock + ssock.fileno.return_value = 7 + csock = self.loop._csock + csock.fileno.return_value = 1 + remove_reader = self.loop.remove_reader = mock.Mock() + + self.loop._selector.close() + self.loop._selector = selector = mock.Mock() + self.assertFalse(self.loop.is_closed()) + + self.loop.close() + self.assertTrue(self.loop.is_closed()) + self.assertIsNone(self.loop._selector) + self.assertIsNone(self.loop._csock) + self.assertIsNone(self.loop._ssock) + selector.close.assert_called_with() + ssock.close.assert_called_with() + csock.close.assert_called_with() + remove_reader.assert_called_with(7) + + # it should be possible to call close() more than once + self.loop.close() + self.loop.close() + + # operation blocked when the loop is closed + f = asyncio.Future(loop=self.loop) + self.assertRaises(RuntimeError, self.loop.run_forever) + self.assertRaises(RuntimeError, self.loop.run_until_complete, f) + fd = 0 + def callback(): + pass + self.assertRaises(RuntimeError, self.loop.add_reader, fd, callback) + self.assertRaises(RuntimeError, self.loop.add_writer, fd, callback) + + def test_close_no_selector(self): + self.loop.remove_reader = mock.Mock() + self.loop._selector.close() + self.loop._selector = None + self.loop.close() + self.assertIsNone(self.loop._selector) + + def test_socketpair(self): + self.assertRaises(NotImplementedError, self.loop._socketpair) + + def test_read_from_self_tryagain(self): + self.loop._ssock.recv.side_effect = BlockingIOError + self.assertIsNone(self.loop._read_from_self()) + + def test_read_from_self_exception(self): + self.loop._ssock.recv.side_effect = OSError + self.assertRaises(OSError, self.loop._read_from_self) + + def test_write_to_self_tryagain(self): + self.loop._csock.send.side_effect = BlockingIOError + with test_utils.disable_logger(): + self.assertIsNone(self.loop._write_to_self()) + + def test_write_to_self_exception(self): + # _write_to_self() swallows OSError + self.loop._csock.send.side_effect = RuntimeError() + self.assertRaises(RuntimeError, self.loop._write_to_self) + + def test_sock_recv(self): + sock = test_utils.mock_nonblocking_socket() + self.loop._sock_recv = mock.Mock() + + f = self.loop.sock_recv(sock, 1024) + self.assertIsInstance(f, asyncio.Future) + self.loop._sock_recv.assert_called_with(f, False, sock, 1024) + + def test__sock_recv_canceled_fut(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop._sock_recv(f, False, sock, 1024) + self.assertFalse(sock.recv.called) + + def test__sock_recv_unregister(self): + sock = mock.Mock() + sock.fileno.return_value = 10 + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop.remove_reader = mock.Mock() + self.loop._sock_recv(f, True, sock, 1024) + self.assertEqual((10,), self.loop.remove_reader.call_args[0]) + + def test__sock_recv_tryagain(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.recv.side_effect = BlockingIOError + + self.loop.add_reader = mock.Mock() + self.loop._sock_recv(f, False, sock, 1024) + self.assertEqual((10, self.loop._sock_recv, f, True, sock, 1024), + self.loop.add_reader.call_args[0]) + + def test__sock_recv_exception(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + err = sock.recv.side_effect = OSError() + + self.loop._sock_recv(f, False, sock, 1024) + self.assertIs(err, f.exception()) + + def test_sock_sendall(self): + sock = test_utils.mock_nonblocking_socket() + self.loop._sock_sendall = mock.Mock() + + f = self.loop.sock_sendall(sock, b'data') + self.assertIsInstance(f, asyncio.Future) + self.assertEqual( + (f, False, sock, b'data'), + self.loop._sock_sendall.call_args[0]) + + def test_sock_sendall_nodata(self): + sock = test_utils.mock_nonblocking_socket() + self.loop._sock_sendall = mock.Mock() + + f = self.loop.sock_sendall(sock, b'') + self.assertIsInstance(f, asyncio.Future) + self.assertTrue(f.done()) + self.assertIsNone(f.result()) + self.assertFalse(self.loop._sock_sendall.called) + + def test__sock_sendall_canceled_fut(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop._sock_sendall(f, False, sock, b'data') + self.assertFalse(sock.send.called) + + def test__sock_sendall_unregister(self): + sock = mock.Mock() + sock.fileno.return_value = 10 + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop.remove_writer = mock.Mock() + self.loop._sock_sendall(f, True, sock, b'data') + self.assertEqual((10,), self.loop.remove_writer.call_args[0]) + + def test__sock_sendall_tryagain(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.send.side_effect = BlockingIOError + + self.loop.add_writer = mock.Mock() + self.loop._sock_sendall(f, False, sock, b'data') + self.assertEqual( + (10, self.loop._sock_sendall, f, True, sock, b'data'), + self.loop.add_writer.call_args[0]) + + def test__sock_sendall_interrupted(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.send.side_effect = InterruptedError + + self.loop.add_writer = mock.Mock() + self.loop._sock_sendall(f, False, sock, b'data') + self.assertEqual( + (10, self.loop._sock_sendall, f, True, sock, b'data'), + self.loop.add_writer.call_args[0]) + + def test__sock_sendall_exception(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + err = sock.send.side_effect = OSError() + + self.loop._sock_sendall(f, False, sock, b'data') + self.assertIs(f.exception(), err) + + def test__sock_sendall(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + sock.fileno.return_value = 10 + sock.send.return_value = 4 + + self.loop._sock_sendall(f, False, sock, b'data') + self.assertTrue(f.done()) + self.assertIsNone(f.result()) + + def test__sock_sendall_partial(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + sock.fileno.return_value = 10 + sock.send.return_value = 2 + + self.loop.add_writer = mock.Mock() + self.loop._sock_sendall(f, False, sock, b'data') + self.assertFalse(f.done()) + self.assertEqual( + (10, self.loop._sock_sendall, f, True, sock, b'ta'), + self.loop.add_writer.call_args[0]) + + def test__sock_sendall_none(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + sock.fileno.return_value = 10 + sock.send.return_value = 0 + + self.loop.add_writer = mock.Mock() + self.loop._sock_sendall(f, False, sock, b'data') + self.assertFalse(f.done()) + self.assertEqual( + (10, self.loop._sock_sendall, f, True, sock, b'data'), + self.loop.add_writer.call_args[0]) + + def test_sock_connect(self): + sock = test_utils.mock_nonblocking_socket() + self.loop._sock_connect = mock.Mock() + + f = self.loop.sock_connect(sock, ('127.0.0.1', 8080)) + self.assertIsInstance(f, asyncio.Future) + self.assertEqual( + (f, sock, ('127.0.0.1', 8080)), + self.loop._sock_connect.call_args[0]) + + def test_sock_connect_timeout(self): + # asyncio issue #205: sock_connect() must unregister the socket on + # timeout error + + # prepare mocks + self.loop.add_writer = mock.Mock() + self.loop.remove_writer = mock.Mock() + sock = test_utils.mock_nonblocking_socket() + sock.connect.side_effect = BlockingIOError + + # first call to sock_connect() registers the socket + fut = self.loop.sock_connect(sock, ('127.0.0.1', 80)) + self.assertTrue(sock.connect.called) + self.assertTrue(self.loop.add_writer.called) + self.assertEqual(len(fut._callbacks), 1) + + # on timeout, the socket must be unregistered + sock.connect.reset_mock() + fut.set_exception(asyncio.TimeoutError) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(fut) + self.assertTrue(self.loop.remove_writer.called) + + def test__sock_connect(self): + f = asyncio.Future(loop=self.loop) + + sock = mock.Mock() + sock.fileno.return_value = 10 + + self.loop._sock_connect(f, sock, ('127.0.0.1', 8080)) + self.assertTrue(f.done()) + self.assertIsNone(f.result()) + self.assertTrue(sock.connect.called) + + def test__sock_connect_cb_cancelled_fut(self): + sock = mock.Mock() + self.loop.remove_writer = mock.Mock() + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) + self.assertFalse(sock.getsockopt.called) + + def test__sock_connect_writer(self): + # check that the fd is registered and then unregistered + self.loop._process_events = mock.Mock() + self.loop.add_writer = mock.Mock() + self.loop.remove_writer = mock.Mock() + + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.connect.side_effect = BlockingIOError + sock.getsockopt.return_value = 0 + address = ('127.0.0.1', 8080) + + f = asyncio.Future(loop=self.loop) + self.loop._sock_connect(f, sock, address) + self.assertTrue(self.loop.add_writer.called) + self.assertEqual(10, self.loop.add_writer.call_args[0][0]) + + self.loop._sock_connect_cb(f, sock, address) + # need to run the event loop to execute _sock_connect_done() callback + self.loop.run_until_complete(f) + self.assertEqual((10,), self.loop.remove_writer.call_args[0]) + + def test__sock_connect_cb_tryagain(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.getsockopt.return_value = errno.EAGAIN + + # check that the exception is handled + self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) + + def test__sock_connect_cb_exception(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.getsockopt.return_value = errno.ENOTCONN + + self.loop.remove_writer = mock.Mock() + self.loop._sock_connect_cb(f, sock, ('127.0.0.1', 8080)) + self.assertIsInstance(f.exception(), OSError) + + def test_sock_accept(self): + sock = test_utils.mock_nonblocking_socket() + self.loop._sock_accept = mock.Mock() + + f = self.loop.sock_accept(sock) + self.assertIsInstance(f, asyncio.Future) + self.assertEqual( + (f, False, sock), self.loop._sock_accept.call_args[0]) + + def test__sock_accept(self): + f = asyncio.Future(loop=self.loop) + + conn = mock.Mock() + + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.accept.return_value = conn, ('127.0.0.1', 1000) + + self.loop._sock_accept(f, False, sock) + self.assertTrue(f.done()) + self.assertEqual((conn, ('127.0.0.1', 1000)), f.result()) + self.assertEqual((False,), conn.setblocking.call_args[0]) + + def test__sock_accept_canceled_fut(self): + sock = mock.Mock() + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop._sock_accept(f, False, sock) + self.assertFalse(sock.accept.called) + + def test__sock_accept_unregister(self): + sock = mock.Mock() + sock.fileno.return_value = 10 + + f = asyncio.Future(loop=self.loop) + f.cancel() + + self.loop.remove_reader = mock.Mock() + self.loop._sock_accept(f, True, sock) + self.assertEqual((10,), self.loop.remove_reader.call_args[0]) + + def test__sock_accept_tryagain(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + sock.accept.side_effect = BlockingIOError + + self.loop.add_reader = mock.Mock() + self.loop._sock_accept(f, False, sock) + self.assertEqual( + (10, self.loop._sock_accept, f, True, sock), + self.loop.add_reader.call_args[0]) + + def test__sock_accept_exception(self): + f = asyncio.Future(loop=self.loop) + sock = mock.Mock() + sock.fileno.return_value = 10 + err = sock.accept.side_effect = OSError() + + self.loop._sock_accept(f, False, sock) + self.assertIs(err, f.exception()) + + def test_add_reader(self): + self.loop._selector.get_key.side_effect = KeyError + cb = lambda: True + self.loop.add_reader(1, cb) + + self.assertTrue(self.loop._selector.register.called) + fd, mask, (r, w) = self.loop._selector.register.call_args[0] + self.assertEqual(1, fd) + self.assertEqual(selectors.EVENT_READ, mask) + self.assertEqual(cb, r._callback) + self.assertIsNone(w) + + def test_add_reader_existing(self): + reader = mock.Mock() + writer = mock.Mock() + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_WRITE, (reader, writer)) + cb = lambda: True + self.loop.add_reader(1, cb) + + self.assertTrue(reader.cancel.called) + self.assertFalse(self.loop._selector.register.called) + self.assertTrue(self.loop._selector.modify.called) + fd, mask, (r, w) = self.loop._selector.modify.call_args[0] + self.assertEqual(1, fd) + self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) + self.assertEqual(cb, r._callback) + self.assertEqual(writer, w) + + def test_add_reader_existing_writer(self): + writer = mock.Mock() + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_WRITE, (None, writer)) + cb = lambda: True + self.loop.add_reader(1, cb) + + self.assertFalse(self.loop._selector.register.called) + self.assertTrue(self.loop._selector.modify.called) + fd, mask, (r, w) = self.loop._selector.modify.call_args[0] + self.assertEqual(1, fd) + self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) + self.assertEqual(cb, r._callback) + self.assertEqual(writer, w) + + def test_remove_reader(self): + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_READ, (None, None)) + self.assertFalse(self.loop.remove_reader(1)) + + self.assertTrue(self.loop._selector.unregister.called) + + def test_remove_reader_read_write(self): + reader = mock.Mock() + writer = mock.Mock() + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_READ | selectors.EVENT_WRITE, + (reader, writer)) + self.assertTrue( + self.loop.remove_reader(1)) + + self.assertFalse(self.loop._selector.unregister.called) + self.assertEqual( + (1, selectors.EVENT_WRITE, (None, writer)), + self.loop._selector.modify.call_args[0]) + + def test_remove_reader_unknown(self): + self.loop._selector.get_key.side_effect = KeyError + self.assertFalse( + self.loop.remove_reader(1)) + + def test_add_writer(self): + self.loop._selector.get_key.side_effect = KeyError + cb = lambda: True + self.loop.add_writer(1, cb) + + self.assertTrue(self.loop._selector.register.called) + fd, mask, (r, w) = self.loop._selector.register.call_args[0] + self.assertEqual(1, fd) + self.assertEqual(selectors.EVENT_WRITE, mask) + self.assertIsNone(r) + self.assertEqual(cb, w._callback) + + def test_add_writer_existing(self): + reader = mock.Mock() + writer = mock.Mock() + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_READ, (reader, writer)) + cb = lambda: True + self.loop.add_writer(1, cb) + + self.assertTrue(writer.cancel.called) + self.assertFalse(self.loop._selector.register.called) + self.assertTrue(self.loop._selector.modify.called) + fd, mask, (r, w) = self.loop._selector.modify.call_args[0] + self.assertEqual(1, fd) + self.assertEqual(selectors.EVENT_WRITE | selectors.EVENT_READ, mask) + self.assertEqual(reader, r) + self.assertEqual(cb, w._callback) + + def test_remove_writer(self): + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_WRITE, (None, None)) + self.assertFalse(self.loop.remove_writer(1)) + + self.assertTrue(self.loop._selector.unregister.called) + + def test_remove_writer_read_write(self): + reader = mock.Mock() + writer = mock.Mock() + self.loop._selector.get_key.return_value = selectors.SelectorKey( + 1, 1, selectors.EVENT_READ | selectors.EVENT_WRITE, + (reader, writer)) + self.assertTrue( + self.loop.remove_writer(1)) + + self.assertFalse(self.loop._selector.unregister.called) + self.assertEqual( + (1, selectors.EVENT_READ, (reader, None)), + self.loop._selector.modify.call_args[0]) + + def test_remove_writer_unknown(self): + self.loop._selector.get_key.side_effect = KeyError + self.assertFalse( + self.loop.remove_writer(1)) + + def test_process_events_read(self): + reader = mock.Mock() + reader._cancelled = False + + self.loop._add_callback = mock.Mock() + self.loop._process_events( + [(selectors.SelectorKey( + 1, 1, selectors.EVENT_READ, (reader, None)), + selectors.EVENT_READ)]) + self.assertTrue(self.loop._add_callback.called) + self.loop._add_callback.assert_called_with(reader) + + def test_process_events_read_cancelled(self): + reader = mock.Mock() + reader.cancelled = True + + self.loop.remove_reader = mock.Mock() + self.loop._process_events( + [(selectors.SelectorKey( + 1, 1, selectors.EVENT_READ, (reader, None)), + selectors.EVENT_READ)]) + self.loop.remove_reader.assert_called_with(1) + + def test_process_events_write(self): + writer = mock.Mock() + writer._cancelled = False + + self.loop._add_callback = mock.Mock() + self.loop._process_events( + [(selectors.SelectorKey(1, 1, selectors.EVENT_WRITE, + (None, writer)), + selectors.EVENT_WRITE)]) + self.loop._add_callback.assert_called_with(writer) + + def test_process_events_write_cancelled(self): + writer = mock.Mock() + writer.cancelled = True + self.loop.remove_writer = mock.Mock() + + self.loop._process_events( + [(selectors.SelectorKey(1, 1, selectors.EVENT_WRITE, + (None, writer)), + selectors.EVENT_WRITE)]) + self.loop.remove_writer.assert_called_with(1) + + +class SelectorTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.Protocol) + self.sock = mock.Mock(socket.socket) + self.sock.fileno.return_value = 7 + + def create_transport(self): + transport = _SelectorTransport(self.loop, self.sock, self.protocol, + None) + self.addCleanup(close_transport, transport) + return transport + + def test_ctor(self): + tr = self.create_transport() + self.assertIs(tr._loop, self.loop) + self.assertIs(tr._sock, self.sock) + self.assertIs(tr._sock_fd, 7) + + def test_abort(self): + tr = self.create_transport() + tr._force_close = mock.Mock() + + tr.abort() + tr._force_close.assert_called_with(None) + + def test_close(self): + tr = self.create_transport() + tr.close() + + self.assertTrue(tr._closing) + self.assertEqual(1, self.loop.remove_reader_count[7]) + self.protocol.connection_lost(None) + self.assertEqual(tr._conn_lost, 1) + + tr.close() + self.assertEqual(tr._conn_lost, 1) + self.assertEqual(1, self.loop.remove_reader_count[7]) + + def test_close_write_buffer(self): + tr = self.create_transport() + tr._buffer.extend(b'data') + tr.close() + + self.assertFalse(self.loop.readers) + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.connection_lost.called) + + def test_force_close(self): + tr = self.create_transport() + tr._buffer.extend(b'1') + self.loop.add_reader(7, mock.sentinel) + self.loop.add_writer(7, mock.sentinel) + tr._force_close(None) + + self.assertTrue(tr._closing) + self.assertEqual(tr._buffer, list_to_buffer()) + self.assertFalse(self.loop.readers) + self.assertFalse(self.loop.writers) + + # second close should not remove reader + tr._force_close(None) + self.assertFalse(self.loop.readers) + self.assertEqual(1, self.loop.remove_reader_count[7]) + + @mock.patch('trollius.log.logger.error') + def test_fatal_error(self, m_exc): + exc = OSError() + tr = self.create_transport() + tr._force_close = mock.Mock() + tr._fatal_error(exc) + + m_exc.assert_called_with( + test_utils.MockPattern( + 'Fatal error on transport\nprotocol:.*\ntransport:.*'), + exc_info=(OSError, MOCK_ANY, MOCK_ANY)) + + tr._force_close.assert_called_with(exc) + + def test_connection_lost(self): + exc = OSError() + tr = self.create_transport() + self.assertIsNotNone(tr._protocol) + self.assertIsNotNone(tr._loop) + tr._call_connection_lost(exc) + + self.protocol.connection_lost.assert_called_with(exc) + self.sock.close.assert_called_with() + self.assertIsNone(tr._sock) + + self.assertIsNone(tr._protocol) + self.assertIsNone(tr._loop) + + +class SelectorSocketTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.Protocol) + self.sock = mock.Mock(socket.socket) + self.sock_fd = self.sock.fileno.return_value = 7 + + def socket_transport(self, waiter=None): + transport = _SelectorSocketTransport(self.loop, self.sock, + self.protocol, waiter=waiter) + self.addCleanup(close_transport, transport) + return transport + + def test_ctor(self): + waiter = asyncio.Future(loop=self.loop) + tr = self.socket_transport(waiter=waiter) + self.loop.run_until_complete(waiter) + + self.loop.assert_reader(7, tr._read_ready) + test_utils.run_briefly(self.loop) + self.protocol.connection_made.assert_called_with(tr) + + def test_ctor_with_waiter(self): + waiter = asyncio.Future(loop=self.loop) + self.socket_transport(waiter=waiter) + self.loop.run_until_complete(waiter) + + self.assertIsNone(waiter.result()) + + def test_pause_resume_reading(self): + tr = self.socket_transport() + test_utils.run_briefly(self.loop) + self.assertFalse(tr._paused) + self.loop.assert_reader(7, tr._read_ready) + tr.pause_reading() + self.assertTrue(tr._paused) + self.assertFalse(7 in self.loop.readers) + tr.resume_reading() + self.assertFalse(tr._paused) + self.loop.assert_reader(7, tr._read_ready) + with self.assertRaises(RuntimeError): + tr.resume_reading() + + def test_read_ready(self): + transport = self.socket_transport() + + self.sock.recv.return_value = b'data' + transport._read_ready() + + self.protocol.data_received.assert_called_with(b'data') + + def test_read_ready_eof(self): + transport = self.socket_transport() + transport.close = mock.Mock() + + self.sock.recv.return_value = b'' + transport._read_ready() + + self.protocol.eof_received.assert_called_with() + transport.close.assert_called_with() + + def test_read_ready_eof_keep_open(self): + transport = self.socket_transport() + transport.close = mock.Mock() + + self.sock.recv.return_value = b'' + self.protocol.eof_received.return_value = True + transport._read_ready() + + self.protocol.eof_received.assert_called_with() + self.assertFalse(transport.close.called) + + @mock.patch('logging.exception') + def test_read_ready_tryagain(self, m_exc): + self.sock.recv.side_effect = BlockingIOError + + transport = self.socket_transport() + transport._fatal_error = mock.Mock() + transport._read_ready() + + self.assertFalse(transport._fatal_error.called) + + @mock.patch('logging.exception') + def test_read_ready_tryagain_interrupted(self, m_exc): + self.sock.recv.side_effect = InterruptedError + + transport = self.socket_transport() + transport._fatal_error = mock.Mock() + transport._read_ready() + + self.assertFalse(transport._fatal_error.called) + + @mock.patch('logging.exception') + def test_read_ready_conn_reset(self, m_exc): + err = self.sock.recv.side_effect = ConnectionResetError() + + transport = self.socket_transport() + transport._force_close = mock.Mock() + with test_utils.disable_logger(): + transport._read_ready() + transport._force_close.assert_called_with(err) + + @mock.patch('logging.exception') + def test_read_ready_err(self, m_exc): + err = self.sock.recv.side_effect = OSError() + + transport = self.socket_transport() + transport._fatal_error = mock.Mock() + transport._read_ready() + + transport._fatal_error.assert_called_with( + err, + 'Fatal read error on socket transport') + + def test_write(self): + data = b'data' + self.sock.send.return_value = len(data) + + transport = self.socket_transport() + transport.write(data) + self.sock.send.assert_called_with(data) + + def test_write_bytearray(self): + data = bytearray(b'data') + self.sock.send.return_value = len(data) + + transport = self.socket_transport() + transport.write(data) + self.sock.send.assert_called_with(data) + self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. + + def test_write_memoryview(self): + data = memoryview(b'data') + self.sock.send.return_value = len(data) + + transport = self.socket_transport() + transport.write(data) + self.sock.send.assert_called_with(b'data') + + def test_write_no_data(self): + transport = self.socket_transport() + transport._buffer.extend(b'data') + transport.write(b'') + self.assertFalse(self.sock.send.called) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_buffer(self): + transport = self.socket_transport() + transport._buffer.extend(b'data1') + transport.write(b'data2') + self.assertFalse(self.sock.send.called) + self.assertEqual(list_to_buffer([b'data1', b'data2']), + transport._buffer) + + def test_write_partial(self): + data = b'data' + self.sock.send.return_value = 2 + + transport = self.socket_transport() + transport.write(data) + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'ta']), transport._buffer) + + def test_write_partial_bytearray(self): + data = bytearray(b'data') + self.sock.send.return_value = 2 + + transport = self.socket_transport() + transport.write(data) + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'ta']), transport._buffer) + self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. + + def test_write_partial_memoryview(self): + data = memoryview(b'data') + self.sock.send.return_value = 2 + + transport = self.socket_transport() + transport.write(data) + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'ta']), transport._buffer) + + def test_write_partial_none(self): + data = b'data' + self.sock.send.return_value = 0 + self.sock.fileno.return_value = 7 + + transport = self.socket_transport() + transport.write(data) + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_tryagain(self): + self.sock.send.side_effect = BlockingIOError + + data = b'data' + transport = self.socket_transport() + transport.write(data) + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + @mock.patch('trollius.selector_events.logger') + def test_write_exception(self, m_log): + err = self.sock.send.side_effect = OSError() + + data = b'data' + transport = self.socket_transport() + transport._fatal_error = mock.Mock() + transport.write(data) + transport._fatal_error.assert_called_with( + err, + 'Fatal write error on socket transport') + transport._conn_lost = 1 + + self.sock.reset_mock() + transport.write(data) + self.assertFalse(self.sock.send.called) + self.assertEqual(transport._conn_lost, 2) + transport.write(data) + transport.write(data) + transport.write(data) + transport.write(data) + m_log.warning.assert_called_with('socket.send() raised exception.') + + def test_write_str(self): + transport = self.socket_transport() + self.assertRaises(TypeError, transport.write, 'str') + + def test_write_closing(self): + transport = self.socket_transport() + transport.close() + self.assertEqual(transport._conn_lost, 1) + transport.write(b'data') + self.assertEqual(transport._conn_lost, 2) + + def test_write_ready(self): + data = b'data' + self.sock.send.return_value = len(data) + + transport = self.socket_transport() + transport._buffer.extend(data) + self.loop.add_writer(7, transport._write_ready) + transport._write_ready() + self.assertTrue(self.sock.send.called) + self.assertFalse(self.loop.writers) + + def test_write_ready_closing(self): + data = b'data' + self.sock.send.return_value = len(data) + + transport = self.socket_transport() + transport._closing = True + transport._buffer.extend(data) + self.loop.add_writer(7, transport._write_ready) + transport._write_ready() + self.assertTrue(self.sock.send.called) + self.assertFalse(self.loop.writers) + self.sock.close.assert_called_with() + self.protocol.connection_lost.assert_called_with(None) + + def test_write_ready_no_data(self): + transport = self.socket_transport() + # This is an internal error. + self.assertRaises(AssertionError, transport._write_ready) + + def test_write_ready_partial(self): + data = b'data' + self.sock.send.return_value = 2 + + transport = self.socket_transport() + transport._buffer.extend(data) + self.loop.add_writer(7, transport._write_ready) + transport._write_ready() + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'ta']), transport._buffer) + + def test_write_ready_partial_none(self): + data = b'data' + self.sock.send.return_value = 0 + + transport = self.socket_transport() + transport._buffer.extend(data) + self.loop.add_writer(7, transport._write_ready) + transport._write_ready() + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_ready_tryagain(self): + self.sock.send.side_effect = BlockingIOError + + transport = self.socket_transport() + transport._buffer = list_to_buffer([b'data1', b'data2']) + self.loop.add_writer(7, transport._write_ready) + transport._write_ready() + + self.loop.assert_writer(7, transport._write_ready) + self.assertEqual(list_to_buffer([b'data1data2']), transport._buffer) + + def test_write_ready_exception(self): + err = self.sock.send.side_effect = OSError() + + transport = self.socket_transport() + transport._fatal_error = mock.Mock() + transport._buffer.extend(b'data') + transport._write_ready() + transport._fatal_error.assert_called_with( + err, + 'Fatal write error on socket transport') + + @mock.patch('trollius.base_events.logger') + def test_write_ready_exception_and_close(self, m_log): + self.sock.send.side_effect = OSError() + remove_writer = self.loop.remove_writer = mock.Mock() + + transport = self.socket_transport() + transport.close() + transport._buffer.extend(b'data') + transport._write_ready() + remove_writer.assert_called_with(self.sock_fd) + + def test_write_eof(self): + tr = self.socket_transport() + self.assertTrue(tr.can_write_eof()) + tr.write_eof() + self.sock.shutdown.assert_called_with(socket.SHUT_WR) + tr.write_eof() + self.assertEqual(self.sock.shutdown.call_count, 1) + tr.close() + + def test_write_eof_buffer(self): + tr = self.socket_transport() + self.sock.send.side_effect = BlockingIOError + tr.write(b'data') + tr.write_eof() + self.assertEqual(tr._buffer, list_to_buffer([b'data'])) + self.assertTrue(tr._eof) + self.assertFalse(self.sock.shutdown.called) + self.sock.send.side_effect = lambda _: 4 + tr._write_ready() + self.assertTrue(self.sock.send.called) + self.sock.shutdown.assert_called_with(socket.SHUT_WR) + tr.close() + + +@unittest.skipIf(ssl is None, 'No ssl module') +class SelectorSslTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.Protocol) + self.sock = mock.Mock(socket.socket) + self.sock.fileno.return_value = 7 + self.sslsock = mock.Mock() + self.sslsock.fileno.return_value = 1 + self.sslcontext = mock.Mock() + self.sslcontext.wrap_socket.return_value = self.sslsock + + def ssl_transport(self, waiter=None, server_hostname=None): + transport = _SelectorSslTransport(self.loop, self.sock, self.protocol, + self.sslcontext, waiter=waiter, + server_hostname=server_hostname) + self.addCleanup(close_transport, transport) + return transport + + def _make_one(self, create_waiter=None): + transport = self.ssl_transport() + self.sock.reset_mock() + self.sslsock.reset_mock() + self.sslcontext.reset_mock() + self.loop.reset_counters() + return transport + + def test_on_handshake(self): + waiter = asyncio.Future(loop=self.loop) + tr = self.ssl_transport(waiter=waiter) + self.assertTrue(self.sslsock.do_handshake.called) + self.loop.assert_reader(1, tr._read_ready) + test_utils.run_briefly(self.loop) + self.assertIsNone(waiter.result()) + + def test_on_handshake_reader_retry(self): + self.loop.set_debug(False) + self.sslsock.do_handshake.side_effect = SSLWantReadError + transport = self.ssl_transport() + self.loop.assert_reader(1, transport._on_handshake, None) + + def test_on_handshake_writer_retry(self): + self.loop.set_debug(False) + self.sslsock.do_handshake.side_effect = SSLWantWriteError + transport = self.ssl_transport() + self.loop.assert_writer(1, transport._on_handshake, None) + + def test_on_handshake_exc(self): + exc = ValueError() + self.sslsock.do_handshake.side_effect = exc + with test_utils.disable_logger(): + waiter = asyncio.Future(loop=self.loop) + transport = self.ssl_transport(waiter=waiter) + self.assertTrue(waiter.done()) + self.assertIs(exc, waiter.exception()) + self.assertTrue(self.sslsock.close.called) + + def test_on_handshake_base_exc(self): + waiter = asyncio.Future(loop=self.loop) + transport = self.ssl_transport(waiter=waiter) + exc = BaseException() + self.sslsock.do_handshake.side_effect = exc + with test_utils.disable_logger(): + self.assertRaises(BaseException, transport._on_handshake, 0) + self.assertTrue(self.sslsock.close.called) + self.assertTrue(waiter.done()) + self.assertIs(exc, waiter.exception()) + + def test_cancel_handshake(self): + # Python issue #23197: cancelling an handshake must not raise an + # exception or log an error, even if the handshake failed + waiter = asyncio.Future(loop=self.loop) + transport = self.ssl_transport(waiter=waiter) + waiter.cancel() + exc = ValueError() + self.sslsock.do_handshake.side_effect = exc + with test_utils.disable_logger(): + transport._on_handshake(0) + transport.close() + test_utils.run_briefly(self.loop) + + def test_pause_resume_reading(self): + tr = self._make_one() + self.assertFalse(tr._paused) + self.loop.assert_reader(1, tr._read_ready) + tr.pause_reading() + self.assertTrue(tr._paused) + self.assertFalse(1 in self.loop.readers) + tr.resume_reading() + self.assertFalse(tr._paused) + self.loop.assert_reader(1, tr._read_ready) + with self.assertRaises(RuntimeError): + tr.resume_reading() + + def test_write(self): + transport = self._make_one() + transport.write(b'data') + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_bytearray(self): + transport = self._make_one() + data = bytearray(b'data') + transport.write(data) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + self.assertEqual(data, bytearray(b'data')) # Hasn't been mutated. + self.assertIsNot(data, transport._buffer) # Hasn't been incorporated. + + def test_write_memoryview(self): + transport = self._make_one() + data = memoryview(b'data') + transport.write(data) + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_no_data(self): + transport = self._make_one() + transport._buffer.extend(b'data') + transport.write(b'') + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_str(self): + transport = self._make_one() + self.assertRaises(TypeError, transport.write, UNICODE_STR) + + def test_write_closing(self): + transport = self._make_one() + transport.close() + self.assertEqual(transport._conn_lost, 1) + transport.write(b'data') + self.assertEqual(transport._conn_lost, 2) + + @mock.patch('trollius.selector_events.logger') + def test_write_exception(self, m_log): + transport = self._make_one() + transport._conn_lost = 1 + transport.write(b'data') + self.assertEqual(transport._buffer, list_to_buffer()) + transport.write(b'data') + transport.write(b'data') + transport.write(b'data') + transport.write(b'data') + m_log.warning.assert_called_with('socket.send() raised exception.') + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv(self): + self.sslsock.recv.return_value = b'data' + transport = self._make_one() + transport._read_ready() + self.assertTrue(self.sslsock.recv.called) + self.assertEqual((b'data',), self.protocol.data_received.call_args[0]) + + def test_read_ready_write_wants_read(self): + self.loop.add_writer = mock.Mock() + self.sslsock.recv.side_effect = BlockingIOError + transport = self._make_one() + transport._write_wants_read = True + transport._write_ready = mock.Mock() + transport._buffer.extend(b'data') + transport._read_ready() + + self.assertFalse(transport._write_wants_read) + transport._write_ready.assert_called_with() + self.loop.add_writer.assert_called_with( + transport._sock_fd, transport._write_ready) + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv_eof(self): + self.sslsock.recv.return_value = b'' + transport = self._make_one() + transport.close = mock.Mock() + transport._read_ready() + transport.close.assert_called_with() + self.protocol.eof_received.assert_called_with() + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv_conn_reset(self): + err = self.sslsock.recv.side_effect = ConnectionResetError() + transport = self._make_one() + transport._force_close = mock.Mock() + with test_utils.disable_logger(): + transport._read_ready() + transport._force_close.assert_called_with(err) + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv_retry(self): + self.sslsock.recv.side_effect = SSLWantReadError + transport = self._make_one() + transport._read_ready() + self.assertTrue(self.sslsock.recv.called) + self.assertFalse(self.protocol.data_received.called) + + self.sslsock.recv.side_effect = BlockingIOError + transport._read_ready() + self.assertFalse(self.protocol.data_received.called) + + self.sslsock.recv.side_effect = InterruptedError + transport._read_ready() + self.assertFalse(self.protocol.data_received.called) + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv_write(self): + self.loop.remove_reader = mock.Mock() + self.loop.add_writer = mock.Mock() + self.sslsock.recv.side_effect = SSLWantWriteError + transport = self._make_one() + transport._read_ready() + self.assertFalse(self.protocol.data_received.called) + self.assertTrue(transport._read_wants_write) + + self.loop.remove_reader.assert_called_with(transport._sock_fd) + self.loop.add_writer.assert_called_with( + transport._sock_fd, transport._write_ready) + + @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround') + def test_read_ready_recv_exc(self): + err = self.sslsock.recv.side_effect = OSError() + transport = self._make_one() + transport._fatal_error = mock.Mock() + transport._read_ready() + transport._fatal_error.assert_called_with( + err, + 'Fatal read error on SSL transport') + + def test_write_ready_send(self): + self.sslsock.send.return_value = 4 + transport = self._make_one() + transport._buffer = list_to_buffer([b'data']) + transport._write_ready() + self.assertEqual(list_to_buffer(), transport._buffer) + self.assertTrue(self.sslsock.send.called) + + def test_write_ready_send_none(self): + self.sslsock.send.return_value = 0 + transport = self._make_one() + transport._buffer = list_to_buffer([b'data1', b'data2']) + transport._write_ready() + self.assertTrue(self.sslsock.send.called) + self.assertEqual(list_to_buffer([b'data1data2']), transport._buffer) + + def test_write_ready_send_partial(self): + self.sslsock.send.return_value = 2 + transport = self._make_one() + transport._buffer = list_to_buffer([b'data1', b'data2']) + transport._write_ready() + self.assertTrue(self.sslsock.send.called) + self.assertEqual(list_to_buffer([b'ta1data2']), transport._buffer) + + def test_write_ready_send_closing_partial(self): + self.sslsock.send.return_value = 2 + transport = self._make_one() + transport._buffer = list_to_buffer([b'data1', b'data2']) + transport._write_ready() + self.assertTrue(self.sslsock.send.called) + self.assertFalse(self.sslsock.close.called) + + def test_write_ready_send_closing(self): + self.sslsock.send.return_value = 4 + transport = self._make_one() + transport.close() + transport._buffer = list_to_buffer([b'data']) + transport._write_ready() + self.assertFalse(self.loop.writers) + self.protocol.connection_lost.assert_called_with(None) + + def test_write_ready_send_closing_empty_buffer(self): + self.sslsock.send.return_value = 4 + transport = self._make_one() + transport.close() + transport._buffer = list_to_buffer() + transport._write_ready() + self.assertFalse(self.loop.writers) + self.protocol.connection_lost.assert_called_with(None) + + def test_write_ready_send_retry(self): + transport = self._make_one() + transport._buffer = list_to_buffer([b'data']) + + self.sslsock.send.side_effect = SSLWantWriteError + transport._write_ready() + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + self.sslsock.send.side_effect = BlockingIOError() + transport._write_ready() + self.assertEqual(list_to_buffer([b'data']), transport._buffer) + + def test_write_ready_send_read(self): + transport = self._make_one() + transport._buffer = list_to_buffer([b'data']) + + self.loop.remove_writer = mock.Mock() + self.sslsock.send.side_effect = SSLWantReadError + transport._write_ready() + self.assertFalse(self.protocol.data_received.called) + self.assertTrue(transport._write_wants_read) + self.loop.remove_writer.assert_called_with(transport._sock_fd) + + def test_write_ready_send_exc(self): + err = self.sslsock.send.side_effect = OSError() + + transport = self._make_one() + transport._buffer = list_to_buffer([b'data']) + transport._fatal_error = mock.Mock() + transport._write_ready() + transport._fatal_error.assert_called_with( + err, + 'Fatal write error on SSL transport') + self.assertEqual(list_to_buffer(), transport._buffer) + + def test_write_ready_read_wants_write(self): + self.loop.add_reader = mock.Mock() + self.sslsock.send.side_effect = BlockingIOError + transport = self._make_one() + transport._read_wants_write = True + transport._read_ready = mock.Mock() + transport._write_ready() + + self.assertFalse(transport._read_wants_write) + transport._read_ready.assert_called_with() + self.loop.add_reader.assert_called_with( + transport._sock_fd, transport._read_ready) + + def test_write_eof(self): + tr = self._make_one() + self.assertFalse(tr.can_write_eof()) + self.assertRaises(NotImplementedError, tr.write_eof) + + def check_close(self): + tr = self._make_one() + tr.close() + + self.assertTrue(tr._closing) + self.assertEqual(1, self.loop.remove_reader_count[1]) + self.assertEqual(tr._conn_lost, 1) + + tr.close() + self.assertEqual(tr._conn_lost, 1) + self.assertEqual(1, self.loop.remove_reader_count[1]) + + test_utils.run_briefly(self.loop) + + def test_close(self): + self.check_close() + self.assertTrue(self.protocol.connection_made.called) + self.assertTrue(self.protocol.connection_lost.called) + + def test_close_not_connected(self): + self.sslsock.do_handshake.side_effect = SSLWantReadError + self.check_close() + self.assertFalse(self.protocol.connection_made.called) + self.assertFalse(self.protocol.connection_lost.called) + + @unittest.skipIf(ssl is None, 'No SSL support') + def test_server_hostname(self): + self.ssl_transport(server_hostname='localhost') + self.sslcontext.wrap_socket.assert_called_with( + self.sock, do_handshake_on_connect=False, server_side=False, + server_hostname='localhost') + + +class SelectorSslWithoutSslTransportTests(test_utils.TestCase): + + @mock.patch('trollius.selector_events.ssl', None) + def test_ssl_transport_requires_ssl_module(self): + Mock = mock.Mock + with self.assertRaises(RuntimeError): + _SelectorSslTransport(Mock(), Mock(), Mock(), Mock()) + + +class SelectorDatagramTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.DatagramProtocol) + self.sock = mock.Mock(spec_set=socket.socket) + self.sock.fileno.return_value = 7 + + def datagram_transport(self, address=None): + transport = _SelectorDatagramTransport(self.loop, self.sock, + self.protocol, + address=address) + self.addCleanup(close_transport, transport) + return transport + + def test_read_ready(self): + transport = self.datagram_transport() + + self.sock.recvfrom.return_value = (b'data', ('0.0.0.0', 1234)) + transport._read_ready() + + self.protocol.datagram_received.assert_called_with( + b'data', ('0.0.0.0', 1234)) + + def test_read_ready_tryagain(self): + transport = self.datagram_transport() + + self.sock.recvfrom.side_effect = BlockingIOError + transport._fatal_error = mock.Mock() + transport._read_ready() + + self.assertFalse(transport._fatal_error.called) + + def test_read_ready_err(self): + transport = self.datagram_transport() + + err = self.sock.recvfrom.side_effect = RuntimeError() + transport._fatal_error = mock.Mock() + transport._read_ready() + + transport._fatal_error.assert_called_with( + err, + 'Fatal read error on datagram transport') + + def test_read_ready_oserr(self): + transport = self.datagram_transport() + + err = self.sock.recvfrom.side_effect = OSError() + transport._fatal_error = mock.Mock() + transport._read_ready() + + self.assertFalse(transport._fatal_error.called) + self.protocol.error_received.assert_called_with(err) + + def test_sendto(self): + data = b'data' + transport = self.datagram_transport() + transport.sendto(data, ('0.0.0.0', 1234)) + self.assertTrue(self.sock.sendto.called) + self.assertEqual( + self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234))) + + def test_sendto_bytearray(self): + data = bytearray(b'data') + transport = self.datagram_transport() + transport.sendto(data, ('0.0.0.0', 1234)) + self.assertTrue(self.sock.sendto.called) + self.assertEqual( + self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234))) + + def test_sendto_memoryview(self): + data = memoryview(b'data') + transport = self.datagram_transport() + transport.sendto(data, ('0.0.0.0', 1234)) + self.assertTrue(self.sock.sendto.called) + self.assertEqual( + self.sock.sendto.call_args[0], (b'data', ('0.0.0.0', 1234))) + + def test_sendto_no_data(self): + transport = self.datagram_transport() + transport._buffer.append((b'data', ('0.0.0.0', 12345))) + transport.sendto(b'', ()) + self.assertFalse(self.sock.sendto.called) + self.assertEqual( + [(b'data', ('0.0.0.0', 12345))], list(transport._buffer)) + + def test_sendto_buffer(self): + transport = self.datagram_transport() + transport._buffer.append((b'data1', ('0.0.0.0', 12345))) + transport.sendto(b'data2', ('0.0.0.0', 12345)) + self.assertFalse(self.sock.sendto.called) + self.assertEqual( + [(b'data1', ('0.0.0.0', 12345)), + (b'data2', ('0.0.0.0', 12345))], + list(transport._buffer)) + + def test_sendto_buffer_bytearray(self): + data2 = bytearray(b'data2') + transport = self.datagram_transport() + transport._buffer.append((b'data1', ('0.0.0.0', 12345))) + transport.sendto(data2, ('0.0.0.0', 12345)) + self.assertFalse(self.sock.sendto.called) + self.assertEqual( + [(b'data1', ('0.0.0.0', 12345)), + (b'data2', ('0.0.0.0', 12345))], + list(transport._buffer)) + self.assertIsInstance(transport._buffer[1][0], bytes) + + def test_sendto_buffer_memoryview(self): + data2 = memoryview(b'data2') + transport = self.datagram_transport() + transport._buffer.append((b'data1', ('0.0.0.0', 12345))) + transport.sendto(data2, ('0.0.0.0', 12345)) + self.assertFalse(self.sock.sendto.called) + self.assertEqual( + [(b'data1', ('0.0.0.0', 12345)), + (b'data2', ('0.0.0.0', 12345))], + list(transport._buffer)) + self.assertIsInstance(transport._buffer[1][0], bytes) + + def test_sendto_tryagain(self): + data = b'data' + + self.sock.sendto.side_effect = BlockingIOError + + transport = self.datagram_transport() + transport.sendto(data, ('0.0.0.0', 12345)) + + self.loop.assert_writer(7, transport._sendto_ready) + self.assertEqual( + [(b'data', ('0.0.0.0', 12345))], list(transport._buffer)) + + @mock.patch('trollius.selector_events.logger') + def test_sendto_exception(self, m_log): + data = b'data' + err = self.sock.sendto.side_effect = RuntimeError() + + transport = self.datagram_transport() + transport._fatal_error = mock.Mock() + transport.sendto(data, ()) + + self.assertTrue(transport._fatal_error.called) + transport._fatal_error.assert_called_with( + err, + 'Fatal write error on datagram transport') + transport._conn_lost = 1 + + transport._address = ('123',) + transport.sendto(data) + transport.sendto(data) + transport.sendto(data) + transport.sendto(data) + transport.sendto(data) + m_log.warning.assert_called_with('socket.send() raised exception.') + + def test_sendto_error_received(self): + data = b'data' + + self.sock.sendto.side_effect = ConnectionRefusedError + + transport = self.datagram_transport() + transport._fatal_error = mock.Mock() + transport.sendto(data, ()) + + self.assertEqual(transport._conn_lost, 0) + self.assertFalse(transport._fatal_error.called) + + def test_sendto_error_received_connected(self): + data = b'data' + + self.sock.send.side_effect = ConnectionRefusedError + + transport = self.datagram_transport(address=('0.0.0.0', 1)) + transport._fatal_error = mock.Mock() + transport.sendto(data) + + self.assertFalse(transport._fatal_error.called) + self.assertTrue(self.protocol.error_received.called) + + def test_sendto_str(self): + transport = self.datagram_transport() + self.assertRaises(TypeError, transport.sendto, UNICODE_STR, ()) + + def test_sendto_connected_addr(self): + transport = self.datagram_transport(address=('0.0.0.0', 1)) + self.assertRaises( + ValueError, transport.sendto, b'str', ('0.0.0.0', 2)) + + def test_sendto_closing(self): + transport = self.datagram_transport(address=(1,)) + transport.close() + self.assertEqual(transport._conn_lost, 1) + transport.sendto(b'data', (1,)) + self.assertEqual(transport._conn_lost, 2) + + def test_sendto_ready(self): + data = b'data' + self.sock.sendto.return_value = len(data) + + transport = self.datagram_transport() + transport._buffer.append((data, ('0.0.0.0', 12345))) + self.loop.add_writer(7, transport._sendto_ready) + transport._sendto_ready() + self.assertTrue(self.sock.sendto.called) + self.assertEqual( + self.sock.sendto.call_args[0], (data, ('0.0.0.0', 12345))) + self.assertFalse(self.loop.writers) + + def test_sendto_ready_closing(self): + data = b'data' + self.sock.send.return_value = len(data) + + transport = self.datagram_transport() + transport._closing = True + transport._buffer.append((data, ())) + self.loop.add_writer(7, transport._sendto_ready) + transport._sendto_ready() + self.sock.sendto.assert_called_with(data, ()) + self.assertFalse(self.loop.writers) + self.sock.close.assert_called_with() + self.protocol.connection_lost.assert_called_with(None) + + def test_sendto_ready_no_data(self): + transport = self.datagram_transport() + self.loop.add_writer(7, transport._sendto_ready) + transport._sendto_ready() + self.assertFalse(self.sock.sendto.called) + self.assertFalse(self.loop.writers) + + def test_sendto_ready_tryagain(self): + self.sock.sendto.side_effect = BlockingIOError + + transport = self.datagram_transport() + transport._buffer.extend([(b'data1', ()), (b'data2', ())]) + self.loop.add_writer(7, transport._sendto_ready) + transport._sendto_ready() + + self.loop.assert_writer(7, transport._sendto_ready) + self.assertEqual( + [(b'data1', ()), (b'data2', ())], + list(transport._buffer)) + + def test_sendto_ready_exception(self): + err = self.sock.sendto.side_effect = RuntimeError() + + transport = self.datagram_transport() + transport._fatal_error = mock.Mock() + transport._buffer.append((b'data', ())) + transport._sendto_ready() + + transport._fatal_error.assert_called_with( + err, + 'Fatal write error on datagram transport') + + def test_sendto_ready_error_received(self): + self.sock.sendto.side_effect = ConnectionRefusedError + + transport = self.datagram_transport() + transport._fatal_error = mock.Mock() + transport._buffer.append((b'data', ())) + transport._sendto_ready() + + self.assertFalse(transport._fatal_error.called) + + def test_sendto_ready_error_received_connection(self): + self.sock.send.side_effect = ConnectionRefusedError + + transport = self.datagram_transport(address=('0.0.0.0', 1)) + transport._fatal_error = mock.Mock() + transport._buffer.append((b'data', ())) + transport._sendto_ready() + + self.assertFalse(transport._fatal_error.called) + self.assertTrue(self.protocol.error_received.called) + + @mock.patch('trollius.base_events.logger.error') + def test_fatal_error_connected(self, m_exc): + transport = self.datagram_transport(address=('0.0.0.0', 1)) + err = ConnectionRefusedError() + transport._fatal_error(err) + self.assertFalse(self.protocol.error_received.called) + m_exc.assert_called_with( + test_utils.MockPattern( + 'Fatal error on transport\nprotocol:.*\ntransport:.*'), + exc_info=(ConnectionRefusedError, MOCK_ANY, MOCK_ANY)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_selectors.py b/tests/test_selectors.py new file mode 100644 index 00000000..a06596ea --- /dev/null +++ b/tests/test_selectors.py @@ -0,0 +1,446 @@ +import errno +import os +import random +import signal +import sys +from time import sleep +try: + import resource +except ImportError: + resource = None + +from trollius import selectors +from trollius import test_support as support +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import socketpair +from trollius.test_utils import unittest +from trollius.time_monotonic import time_monotonic as time + + +def find_ready_matching(ready, flag): + match = [] + for key, events in ready: + if events & flag: + match.append(key.fileobj) + return match + + +class BaseSelectorTestCase(object): + + def make_socketpair(self): + rd, wr = socketpair() + self.addCleanup(rd.close) + self.addCleanup(wr.close) + return rd, wr + + def test_register(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + key = s.register(rd, selectors.EVENT_READ, "data") + self.assertIsInstance(key, selectors.SelectorKey) + self.assertEqual(key.fileobj, rd) + self.assertEqual(key.fd, rd.fileno()) + self.assertEqual(key.events, selectors.EVENT_READ) + self.assertEqual(key.data, "data") + + # register an unknown event + self.assertRaises(ValueError, s.register, 0, 999999) + + # register an invalid FD + self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ) + + # register twice + self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ) + + # register the same FD, but with a different object + self.assertRaises(KeyError, s.register, rd.fileno(), + selectors.EVENT_READ) + + def test_unregister(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + s.register(rd, selectors.EVENT_READ) + s.unregister(rd) + + # unregister an unknown file obj + self.assertRaises(KeyError, s.unregister, 999999) + + # unregister twice + self.assertRaises(KeyError, s.unregister, rd) + + def test_unregister_after_fd_close(self): + s = self.SELECTOR() + self.addCleanup(s.close) + rd, wr = self.make_socketpair() + r, w = rd.fileno(), wr.fileno() + s.register(r, selectors.EVENT_READ) + s.register(w, selectors.EVENT_WRITE) + rd.close() + wr.close() + s.unregister(r) + s.unregister(w) + + @unittest.skipUnless(os.name == 'posix', "requires posix") + def test_unregister_after_fd_close_and_reuse(self): + s = self.SELECTOR() + self.addCleanup(s.close) + rd, wr = self.make_socketpair() + r, w = rd.fileno(), wr.fileno() + s.register(r, selectors.EVENT_READ) + s.register(w, selectors.EVENT_WRITE) + rd2, wr2 = self.make_socketpair() + rd.close() + wr.close() + os.dup2(rd2.fileno(), r) + os.dup2(wr2.fileno(), w) + self.addCleanup(os.close, r) + self.addCleanup(os.close, w) + s.unregister(r) + s.unregister(w) + + def test_unregister_after_socket_close(self): + s = self.SELECTOR() + self.addCleanup(s.close) + rd, wr = self.make_socketpair() + s.register(rd, selectors.EVENT_READ) + s.register(wr, selectors.EVENT_WRITE) + rd.close() + wr.close() + s.unregister(rd) + s.unregister(wr) + + def test_modify(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + key = s.register(rd, selectors.EVENT_READ) + + # modify events + key2 = s.modify(rd, selectors.EVENT_WRITE) + self.assertNotEqual(key.events, key2.events) + self.assertEqual(key2, s.get_key(rd)) + + s.unregister(rd) + + # modify data + d1 = object() + d2 = object() + + key = s.register(rd, selectors.EVENT_READ, d1) + key2 = s.modify(rd, selectors.EVENT_READ, d2) + self.assertEqual(key.events, key2.events) + self.assertNotEqual(key.data, key2.data) + self.assertEqual(key2, s.get_key(rd)) + self.assertEqual(key2.data, d2) + + # modify unknown file obj + self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ) + + # modify use a shortcut + d3 = object() + s.register = mock.Mock() + s.unregister = mock.Mock() + + s.modify(rd, selectors.EVENT_READ, d3) + self.assertFalse(s.register.called) + self.assertFalse(s.unregister.called) + + def test_close(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + mapping = s.get_map() + rd, wr = self.make_socketpair() + + s.register(rd, selectors.EVENT_READ) + s.register(wr, selectors.EVENT_WRITE) + + s.close() + self.assertRaises(RuntimeError, s.get_key, rd) + self.assertRaises(RuntimeError, s.get_key, wr) + self.assertRaises(KeyError, mapping.__getitem__, rd) + self.assertRaises(KeyError, mapping.__getitem__, wr) + + def test_get_key(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + key = s.register(rd, selectors.EVENT_READ, "data") + self.assertEqual(key, s.get_key(rd)) + + # unknown file obj + self.assertRaises(KeyError, s.get_key, 999999) + + def test_get_map(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + keys = s.get_map() + self.assertFalse(keys) + self.assertEqual(len(keys), 0) + self.assertEqual(list(keys), []) + key = s.register(rd, selectors.EVENT_READ, "data") + self.assertIn(rd, keys) + self.assertEqual(key, keys[rd]) + self.assertEqual(len(keys), 1) + self.assertEqual(list(keys), [rd.fileno()]) + self.assertEqual(list(keys.values()), [key]) + + # unknown file obj + with self.assertRaises(KeyError): + keys[999999] + + # Read-only mapping + with self.assertRaises(TypeError): + del keys[rd] + + def test_select(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + s.register(rd, selectors.EVENT_READ) + wr_key = s.register(wr, selectors.EVENT_WRITE) + + result = s.select() + for key, events in result: + self.assertTrue(isinstance(key, selectors.SelectorKey)) + self.assertTrue(events) + self.assertFalse(events & ~(selectors.EVENT_READ | + selectors.EVENT_WRITE)) + + self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result) + + def test_context_manager(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + with s as sel: + sel.register(rd, selectors.EVENT_READ) + sel.register(wr, selectors.EVENT_WRITE) + + self.assertRaises(RuntimeError, s.get_key, rd) + self.assertRaises(RuntimeError, s.get_key, wr) + + def test_fileno(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + if hasattr(s, 'fileno'): + fd = s.fileno() + self.assertTrue(isinstance(fd, int)) + self.assertGreaterEqual(fd, 0) + + def test_selector(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + NUM_SOCKETS = 12 + MSG = b" This is a test." + MSG_LEN = len(MSG) + readers = [] + writers = [] + r2w = {} + w2r = {} + + for i in range(NUM_SOCKETS): + rd, wr = self.make_socketpair() + s.register(rd, selectors.EVENT_READ) + s.register(wr, selectors.EVENT_WRITE) + readers.append(rd) + writers.append(wr) + r2w[rd] = wr + w2r[wr] = rd + + bufs = [] + + while writers: + ready = s.select() + ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE) + if not ready_writers: + self.fail("no sockets ready for writing") + wr = random.choice(ready_writers) + wr.send(MSG) + + for i in range(10): + ready = s.select() + ready_readers = find_ready_matching(ready, + selectors.EVENT_READ) + if ready_readers: + break + # there might be a delay between the write to the write end and + # the read end is reported ready + sleep(0.1) + else: + self.fail("no sockets ready for reading") + self.assertEqual([w2r[wr]], ready_readers) + rd = ready_readers[0] + buf = rd.recv(MSG_LEN) + self.assertEqual(len(buf), MSG_LEN) + bufs.append(buf) + s.unregister(r2w[rd]) + s.unregister(rd) + writers.remove(r2w[rd]) + + self.assertEqual(bufs, [MSG] * NUM_SOCKETS) + + @unittest.skipIf(sys.platform == 'win32', + 'select.select() cannot be used with empty fd sets') + def test_empty_select(self): + s = self.SELECTOR() + self.addCleanup(s.close) + self.assertEqual(s.select(timeout=0), []) + + def test_timeout(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + s.register(wr, selectors.EVENT_WRITE) + t = time() + self.assertEqual(1, len(s.select(0))) + self.assertEqual(1, len(s.select(-1))) + self.assertLess(time() - t, 0.5) + + s.unregister(wr) + s.register(rd, selectors.EVENT_READ) + t = time() + self.assertFalse(s.select(0)) + self.assertFalse(s.select(-1)) + self.assertLess(time() - t, 0.5) + + t0 = time() + self.assertFalse(s.select(1)) + t1 = time() + dt = t1 - t0 + # Tolerate 2.0 seconds for very slow buildbots + self.assertTrue(0.8 <= dt <= 2.0, dt) + + @unittest.skipUnless(hasattr(signal, "alarm"), + "signal.alarm() required for this test") + def test_select_interrupt(self): + s = self.SELECTOR() + self.addCleanup(s.close) + + rd, wr = self.make_socketpair() + + orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) + self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) + self.addCleanup(signal.alarm, 0) + + signal.alarm(1) + + s.register(rd, selectors.EVENT_READ) + t = time() + self.assertFalse(s.select(2)) + self.assertLess(time() - t, 2.5) + + +class ScalableSelectorMixIn(object): + + # see issue #18963 for why it's skipped on older OS X versions + @support.requires_mac_ver(10, 5) + @unittest.skipUnless(resource, "Test needs resource module") + def test_above_fd_setsize(self): + # A scalable implementation should have no problem with more than + # FD_SETSIZE file descriptors. Since we don't know the value, we just + # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + try: + resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) + self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, + (soft, hard)) + NUM_FDS = min(hard, 2**16) + except (OSError, ValueError): + NUM_FDS = soft + + # guard for already allocated FDs (stdin, stdout...) + NUM_FDS -= 32 + + s = self.SELECTOR() + self.addCleanup(s.close) + + for i in range(NUM_FDS // 2): + try: + rd, wr = self.make_socketpair() + except OSError: + # too many FDs, skip - note that we should only catch EMFILE + # here, but apparently *BSD and Solaris can fail upon connect() + # or bind() with EADDRNOTAVAIL, so let's be safe + self.skipTest("FD limit reached") + + try: + s.register(rd, selectors.EVENT_READ) + s.register(wr, selectors.EVENT_WRITE) + except OSError as e: + if e.errno == errno.ENOSPC: + # this can be raised by epoll if we go over + # fs.epoll.max_user_watches sysctl + self.skipTest("FD limit reached") + raise + + self.assertEqual(NUM_FDS // 2, len(s.select())) + + +class DefaultSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase): + + SELECTOR = selectors.DefaultSelector + + +class SelectSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase): + + SELECTOR = selectors.SelectSelector + + +@unittest.skipUnless(hasattr(selectors, 'PollSelector'), + "Test needs selectors.PollSelector") +class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn, + test_utils.TestCase): + + SELECTOR = getattr(selectors, 'PollSelector', None) + + +@unittest.skipUnless(hasattr(selectors, 'EpollSelector'), + "Test needs selectors.EpollSelector") +class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn, + test_utils.TestCase): + + SELECTOR = getattr(selectors, 'EpollSelector', None) + + +@unittest.skipUnless(hasattr(selectors, 'KqueueSelector'), + "Test needs selectors.KqueueSelector)") +class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn, + test_utils.TestCase): + + SELECTOR = getattr(selectors, 'KqueueSelector', None) + + +@unittest.skipUnless(hasattr(selectors, 'DevpollSelector'), + "Test needs selectors.DevpollSelector") +class DevpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn, + test_utils.TestCase): + + SELECTOR = getattr(selectors, 'DevpollSelector', None) + + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_sslproto.py b/tests/test_sslproto.py new file mode 100644 index 00000000..aa2af326 --- /dev/null +++ b/tests/test_sslproto.py @@ -0,0 +1,72 @@ +"""Tests for asyncio/sslproto.py.""" + +try: + import ssl +except ImportError: + ssl = None + +import trollius as asyncio +from trollius import ConnectionResetError +from trollius import sslproto +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +@unittest.skipIf(ssl is None, 'No ssl module') +class SslProtoHandshakeTests(test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.new_event_loop() + self.set_event_loop(self.loop) + + def ssl_protocol(self, waiter=None): + sslcontext = test_utils.dummy_ssl_context() + app_proto = asyncio.Protocol() + proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter) + self.addCleanup(proto._app_transport.close) + return proto + + def connection_made(self, ssl_proto, do_handshake=None): + transport = mock.Mock() + sslpipe = mock.Mock() + sslpipe.shutdown.return_value = b'' + if do_handshake: + sslpipe.do_handshake.side_effect = do_handshake + else: + def mock_handshake(callback): + return [] + sslpipe.do_handshake.side_effect = mock_handshake + with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe): + ssl_proto.connection_made(transport) + + def test_cancel_handshake(self): + # Python issue #23197: cancelling an handshake must not raise an + # exception or log an error, even if the handshake failed + waiter = asyncio.Future(loop=self.loop) + ssl_proto = self.ssl_protocol(waiter) + handshake_fut = asyncio.Future(loop=self.loop) + + def do_handshake(callback): + exc = Exception() + callback(exc) + handshake_fut.set_result(None) + return [] + + waiter.cancel() + self.connection_made(ssl_proto, do_handshake) + + with test_utils.disable_logger(): + self.loop.run_until_complete(handshake_fut) + + def test_eof_received_waiter(self): + waiter = asyncio.Future(loop=self.loop) + ssl_proto = self.ssl_protocol(waiter) + self.connection_made(ssl_proto) + ssl_proto.eof_received() + test_utils.run_briefly(self.loop) + self.assertIsInstance(waiter.exception(), ConnectionResetError) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_streams.py b/tests/test_streams.py new file mode 100644 index 00000000..390174ca --- /dev/null +++ b/tests/test_streams.py @@ -0,0 +1,642 @@ +"""Tests for streams.py.""" + +import gc +import io +import os +import socket +import six +import sys +try: + import ssl +except ImportError: + ssl = None + +import trollius as asyncio +from trollius import Return, From +from trollius import compat +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +class StreamReaderTests(test_utils.TestCase): + + DATA = b'line1\nline2\nline3\n' + + def setUp(self): + self.loop = asyncio.new_event_loop() + self.set_event_loop(self.loop) + + def tearDown(self): + # just in case if we have transport close callbacks + test_utils.run_briefly(self.loop) + + self.loop.close() + gc.collect() + super(StreamReaderTests, self).tearDown() + + @mock.patch('trollius.streams.events') + def test_ctor_global_loop(self, m_events): + stream = asyncio.StreamReader() + self.assertIs(stream._loop, m_events.get_event_loop.return_value) + + def _basetest_open_connection(self, open_connection_fut): + reader, writer = self.loop.run_until_complete(open_connection_fut) + writer.write(b'GET / HTTP/1.0\r\n\r\n') + f = reader.readline() + data = self.loop.run_until_complete(f) + self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') + f = reader.read() + data = self.loop.run_until_complete(f) + self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + writer.close() + + def test_open_connection(self): + with test_utils.run_test_server() as httpd: + conn_fut = asyncio.open_connection(*httpd.address, + loop=self.loop) + self._basetest_open_connection(conn_fut) + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_open_unix_connection(self): + with test_utils.run_test_unix_server() as httpd: + conn_fut = asyncio.open_unix_connection(httpd.address, + loop=self.loop) + self._basetest_open_connection(conn_fut) + + def _basetest_open_connection_no_loop_ssl(self, open_connection_fut): + try: + reader, writer = self.loop.run_until_complete(open_connection_fut) + finally: + asyncio.set_event_loop(None) + writer.write(b'GET / HTTP/1.0\r\n\r\n') + f = reader.read() + data = self.loop.run_until_complete(f) + self.assertTrue(data.endswith(b'\r\n\r\nTest message')) + + writer.close() + + @unittest.skipIf(ssl is None, 'No ssl module') + def test_open_connection_no_loop_ssl(self): + with test_utils.run_test_server(use_ssl=True) as httpd: + conn_fut = asyncio.open_connection( + *httpd.address, + ssl=test_utils.dummy_ssl_context(), + loop=self.loop) + + self._basetest_open_connection_no_loop_ssl(conn_fut) + + @unittest.skipIf(ssl is None, 'No ssl module') + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_open_unix_connection_no_loop_ssl(self): + with test_utils.run_test_unix_server(use_ssl=True) as httpd: + conn_fut = asyncio.open_unix_connection( + httpd.address, + ssl=test_utils.dummy_ssl_context(), + server_hostname='', + loop=self.loop) + + self._basetest_open_connection_no_loop_ssl(conn_fut) + + def _basetest_open_connection_error(self, open_connection_fut): + reader, writer = self.loop.run_until_complete(open_connection_fut) + writer._protocol.connection_lost(ZeroDivisionError()) + f = reader.read() + with self.assertRaises(ZeroDivisionError): + self.loop.run_until_complete(f) + writer.close() + test_utils.run_briefly(self.loop) + + def test_open_connection_error(self): + with test_utils.run_test_server() as httpd: + conn_fut = asyncio.open_connection(*httpd.address, + loop=self.loop) + self._basetest_open_connection_error(conn_fut) + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_open_unix_connection_error(self): + with test_utils.run_test_unix_server() as httpd: + conn_fut = asyncio.open_unix_connection(httpd.address, + loop=self.loop) + self._basetest_open_connection_error(conn_fut) + + def test_feed_empty_data(self): + stream = asyncio.StreamReader(loop=self.loop) + + stream.feed_data(b'') + self.assertEqual(b'', stream._buffer) + + def test_feed_nonempty_data(self): + stream = asyncio.StreamReader(loop=self.loop) + + stream.feed_data(self.DATA) + self.assertEqual(self.DATA, stream._buffer) + + def test_read_zero(self): + # Read zero bytes. + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(self.DATA) + + data = self.loop.run_until_complete(stream.read(0)) + self.assertEqual(b'', data) + self.assertEqual(self.DATA, stream._buffer) + + def test_read(self): + # Read bytes. + stream = asyncio.StreamReader(loop=self.loop) + read_task = asyncio.Task(stream.read(30), loop=self.loop) + + def cb(): + stream.feed_data(self.DATA) + self.loop.call_soon(cb) + + data = self.loop.run_until_complete(read_task) + self.assertEqual(self.DATA, data) + self.assertEqual(b'', stream._buffer) + + def test_read_line_breaks(self): + # Read bytes without line breaks. + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'line1') + stream.feed_data(b'line2') + + data = self.loop.run_until_complete(stream.read(5)) + + self.assertEqual(b'line1', data) + self.assertEqual(b'line2', stream._buffer) + + def test_read_eof(self): + # Read bytes, stop at eof. + stream = asyncio.StreamReader(loop=self.loop) + read_task = asyncio.Task(stream.read(1024), loop=self.loop) + + def cb(): + stream.feed_eof() + self.loop.call_soon(cb) + + data = self.loop.run_until_complete(read_task) + self.assertEqual(b'', data) + self.assertEqual(b'', stream._buffer) + + def test_read_until_eof(self): + # Read all bytes until eof. + stream = asyncio.StreamReader(loop=self.loop) + read_task = asyncio.Task(stream.read(-1), loop=self.loop) + + def cb(): + stream.feed_data(b'chunk1\n') + stream.feed_data(b'chunk2') + stream.feed_eof() + self.loop.call_soon(cb) + + data = self.loop.run_until_complete(read_task) + + self.assertEqual(b'chunk1\nchunk2', data) + self.assertEqual(b'', stream._buffer) + + def test_read_exception(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'line\n') + + data = self.loop.run_until_complete(stream.read(2)) + self.assertEqual(b'li', data) + + stream.set_exception(ValueError()) + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.read(2)) + + def test_readline(self): + # Read one line. 'readline' will need to wait for the data + # to come from 'cb' + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'chunk1 ') + read_task = asyncio.Task(stream.readline(), loop=self.loop) + + def cb(): + stream.feed_data(b'chunk2 ') + stream.feed_data(b'chunk3 ') + stream.feed_data(b'\n chunk4') + self.loop.call_soon(cb) + + line = self.loop.run_until_complete(read_task) + self.assertEqual(b'chunk1 chunk2 chunk3 \n', line) + self.assertEqual(b' chunk4', stream._buffer) + + def test_readline_limit_with_existing_data(self): + # Read one line. The data is in StreamReader's buffer + # before the event loop is run. + + stream = asyncio.StreamReader(limit=3, loop=self.loop) + stream.feed_data(b'li') + stream.feed_data(b'ne1\nline2\n') + + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readline()) + # The buffer should contain the remaining data after exception + self.assertEqual(b'line2\n', stream._buffer) + + stream = asyncio.StreamReader(limit=3, loop=self.loop) + stream.feed_data(b'li') + stream.feed_data(b'ne1') + stream.feed_data(b'li') + + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readline()) + # No b'\n' at the end. The 'limit' is set to 3. So before + # waiting for the new data in buffer, 'readline' will consume + # the entire buffer, and since the length of the consumed data + # is more than 3, it will raise a ValueError. The buffer is + # expected to be empty now. + self.assertEqual(b'', stream._buffer) + + def test_at_eof(self): + stream = asyncio.StreamReader(loop=self.loop) + self.assertFalse(stream.at_eof()) + + stream.feed_data(b'some data\n') + self.assertFalse(stream.at_eof()) + + self.loop.run_until_complete(stream.readline()) + self.assertFalse(stream.at_eof()) + + stream.feed_data(b'some data\n') + stream.feed_eof() + self.loop.run_until_complete(stream.readline()) + self.assertTrue(stream.at_eof()) + + def test_readline_limit(self): + # Read one line. StreamReaders are fed with data after + # their 'readline' methods are called. + + stream = asyncio.StreamReader(limit=7, loop=self.loop) + def cb(): + stream.feed_data(b'chunk1') + stream.feed_data(b'chunk2') + stream.feed_data(b'chunk3\n') + stream.feed_eof() + self.loop.call_soon(cb) + + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readline()) + # The buffer had just one line of data, and after raising + # a ValueError it should be empty. + self.assertEqual(b'', stream._buffer) + + stream = asyncio.StreamReader(limit=7, loop=self.loop) + def cb(): + stream.feed_data(b'chunk1') + stream.feed_data(b'chunk2\n') + stream.feed_data(b'chunk3\n') + stream.feed_eof() + self.loop.call_soon(cb) + + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readline()) + self.assertEqual(b'chunk3\n', stream._buffer) + + def test_readline_nolimit_nowait(self): + # All needed data for the first 'readline' call will be + # in the buffer. + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(self.DATA[:6]) + stream.feed_data(self.DATA[6:]) + + line = self.loop.run_until_complete(stream.readline()) + + self.assertEqual(b'line1\n', line) + self.assertEqual(b'line2\nline3\n', stream._buffer) + + def test_readline_eof(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'some data') + stream.feed_eof() + + line = self.loop.run_until_complete(stream.readline()) + self.assertEqual(b'some data', line) + + def test_readline_empty_eof(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_eof() + + line = self.loop.run_until_complete(stream.readline()) + self.assertEqual(b'', line) + + def test_readline_read_byte_count(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(self.DATA) + + self.loop.run_until_complete(stream.readline()) + + data = self.loop.run_until_complete(stream.read(7)) + + self.assertEqual(b'line2\nl', data) + self.assertEqual(b'ine3\n', stream._buffer) + + def test_readline_exception(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'line\n') + + data = self.loop.run_until_complete(stream.readline()) + self.assertEqual(b'line\n', data) + + stream.set_exception(ValueError()) + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readline()) + self.assertEqual(b'', stream._buffer) + + def test_readexactly_zero_or_less(self): + # Read exact number of bytes (zero or less). + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(self.DATA) + + data = self.loop.run_until_complete(stream.readexactly(0)) + self.assertEqual(b'', data) + self.assertEqual(self.DATA, stream._buffer) + + data = self.loop.run_until_complete(stream.readexactly(-1)) + self.assertEqual(b'', data) + self.assertEqual(self.DATA, stream._buffer) + + def test_readexactly(self): + # Read exact number of bytes. + stream = asyncio.StreamReader(loop=self.loop) + + n = 2 * len(self.DATA) + read_task = asyncio.Task(stream.readexactly(n), loop=self.loop) + + def cb(): + stream.feed_data(self.DATA) + stream.feed_data(self.DATA) + stream.feed_data(self.DATA) + self.loop.call_soon(cb) + + data = self.loop.run_until_complete(read_task) + self.assertEqual(self.DATA + self.DATA, data) + self.assertEqual(self.DATA, stream._buffer) + + def test_readexactly_eof(self): + # Read exact number of bytes (eof). + stream = asyncio.StreamReader(loop=self.loop) + n = 2 * len(self.DATA) + read_task = asyncio.Task(stream.readexactly(n), loop=self.loop) + + def cb(): + stream.feed_data(self.DATA) + stream.feed_eof() + self.loop.call_soon(cb) + + with self.assertRaises(asyncio.IncompleteReadError) as cm: + self.loop.run_until_complete(read_task) + self.assertEqual(cm.exception.partial, self.DATA) + self.assertEqual(cm.exception.expected, n) + self.assertEqual(str(cm.exception), + '18 bytes read on a total of 36 expected bytes') + self.assertEqual(b'', stream._buffer) + + def test_readexactly_exception(self): + stream = asyncio.StreamReader(loop=self.loop) + stream.feed_data(b'line\n') + + data = self.loop.run_until_complete(stream.readexactly(2)) + self.assertEqual(b'li', data) + + stream.set_exception(ValueError()) + self.assertRaises( + ValueError, self.loop.run_until_complete, stream.readexactly(2)) + + def test_exception(self): + stream = asyncio.StreamReader(loop=self.loop) + self.assertIsNone(stream.exception()) + + exc = ValueError() + stream.set_exception(exc) + self.assertIs(stream.exception(), exc) + + def test_exception_waiter(self): + stream = asyncio.StreamReader(loop=self.loop) + + @asyncio.coroutine + def set_err(): + self.loop.call_soon(stream.set_exception, ValueError()) + + t1 = asyncio.Task(stream.readline(), loop=self.loop) + t2 = asyncio.Task(set_err(), loop=self.loop) + + self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop)) + + self.assertRaises(ValueError, t1.result) + + def test_exception_cancel(self): + stream = asyncio.StreamReader(loop=self.loop) + + t = asyncio.Task(stream.readline(), loop=self.loop) + test_utils.run_briefly(self.loop) + t.cancel() + test_utils.run_briefly(self.loop) + # The following line fails if set_exception() isn't careful. + stream.set_exception(RuntimeError('message')) + test_utils.run_briefly(self.loop) + self.assertIs(stream._waiter, None) + + def test_start_server(self): + + class MyServer: + + def __init__(self, loop): + self.server = None + self.loop = loop + + @asyncio.coroutine + def handle_client(self, client_reader, client_writer): + data = yield From(client_reader.readline()) + client_writer.write(data) + yield From(client_writer.drain()) + client_writer.close() + + def start(self): + sock = socket.socket() + sock.bind(('127.0.0.1', 0)) + self.server = self.loop.run_until_complete( + asyncio.start_server(self.handle_client, + sock=sock, + loop=self.loop)) + return sock.getsockname() + + def handle_client_callback(self, client_reader, client_writer): + self.loop.create_task(self.handle_client(client_reader, + client_writer)) + + def start_callback(self): + sock = socket.socket() + sock.bind(('127.0.0.1', 0)) + addr = sock.getsockname() + sock.close() + self.server = self.loop.run_until_complete( + asyncio.start_server(self.handle_client_callback, + host=addr[0], port=addr[1], + loop=self.loop)) + return addr + + def stop(self): + if self.server is not None: + self.server.close() + self.loop.run_until_complete(self.server.wait_closed()) + self.server = None + + @asyncio.coroutine + def client(addr): + reader, writer = yield From(asyncio.open_connection( + *addr, loop=self.loop)) + # send a line + writer.write(b"hello world!\n") + # read it back + msgback = yield From(reader.readline()) + writer.close() + raise Return(msgback) + + # test the server variant with a coroutine as client handler + server = MyServer(self.loop) + addr = server.start() + msg = self.loop.run_until_complete(asyncio.Task(client(addr), + loop=self.loop)) + server.stop() + self.assertEqual(msg, b"hello world!\n") + + # test the server variant with a callback as client handler + server = MyServer(self.loop) + addr = server.start_callback() + msg = self.loop.run_until_complete(asyncio.Task(client(addr), + loop=self.loop)) + server.stop() + self.assertEqual(msg, b"hello world!\n") + + @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') + def test_start_unix_server(self): + + class MyServer: + + def __init__(self, loop, path): + self.server = None + self.loop = loop + self.path = path + + @asyncio.coroutine + def handle_client(self, client_reader, client_writer): + data = yield From(client_reader.readline()) + client_writer.write(data) + yield From(client_writer.drain()) + client_writer.close() + + def start(self): + self.server = self.loop.run_until_complete( + asyncio.start_unix_server(self.handle_client, + path=self.path, + loop=self.loop)) + + def handle_client_callback(self, client_reader, client_writer): + self.loop.create_task(self.handle_client(client_reader, + client_writer)) + + def start_callback(self): + start = asyncio.start_unix_server(self.handle_client_callback, + path=self.path, + loop=self.loop) + self.server = self.loop.run_until_complete(start) + + def stop(self): + if self.server is not None: + self.server.close() + self.loop.run_until_complete(self.server.wait_closed()) + self.server = None + + @asyncio.coroutine + def client(path): + reader, writer = yield From(asyncio.open_unix_connection( + path, loop=self.loop)) + # send a line + writer.write(b"hello world!\n") + # read it back + msgback = yield From(reader.readline()) + writer.close() + raise Return(msgback) + + # test the server variant with a coroutine as client handler + with test_utils.unix_socket_path() as path: + server = MyServer(self.loop, path) + server.start() + msg = self.loop.run_until_complete(asyncio.Task(client(path), + loop=self.loop)) + server.stop() + self.assertEqual(msg, b"hello world!\n") + + # test the server variant with a callback as client handler + with test_utils.unix_socket_path() as path: + server = MyServer(self.loop, path) + server.start_callback() + msg = self.loop.run_until_complete(asyncio.Task(client(path), + loop=self.loop)) + server.stop() + self.assertEqual(msg, b"hello world!\n") + + @unittest.skipIf(sys.platform == 'win32', "Don't have pipes") + def test_read_all_from_pipe_reader(self): + # See asyncio issue 168. This test is derived from the example + # subprocess_attach_read_pipe.py, but we configure the + # StreamReader's limit so that twice it is less than the size + # of the data writter. Also we must explicitly attach a child + # watcher to the event loop. + + code = """\ +import os, sys +fd = int(sys.argv[1]) +os.write(fd, b'data') +os.close(fd) +""" + rfd, wfd = os.pipe() + args = [sys.executable, '-c', code, str(wfd)] + + pipe = io.open(rfd, 'rb', 0) + reader = asyncio.StreamReader(loop=self.loop, limit=1) + protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) + transport, _ = self.loop.run_until_complete( + self.loop.connect_read_pipe(lambda: protocol, pipe)) + + watcher = asyncio.SafeChildWatcher() + watcher.attach_loop(self.loop) + try: + asyncio.set_child_watcher(watcher) + kw = {'loop': self.loop} + if six.PY3: + kw['pass_fds'] = set((wfd,)) + create = asyncio.create_subprocess_exec(*args, **kw) + proc = self.loop.run_until_complete(create) + self.loop.run_until_complete(proc.wait()) + finally: + asyncio.set_child_watcher(None) + + os.close(wfd) + data = self.loop.run_until_complete(reader.read(-1)) + self.assertEqual(data, b'data') + + def test_streamreader_constructor(self): + self.addCleanup(asyncio.set_event_loop, None) + asyncio.set_event_loop(self.loop) + + # asyncio issue #184: Ensure that StreamReaderProtocol constructor + # retrieves the current loop if the loop parameter is not set + reader = asyncio.StreamReader() + self.assertIs(reader._loop, self.loop) + + def test_streamreaderprotocol_constructor(self): + self.addCleanup(asyncio.set_event_loop, None) + asyncio.set_event_loop(self.loop) + + # asyncio issue #184: Ensure that StreamReaderProtocol constructor + # retrieves the current loop if the loop parameter is not set + reader = mock.Mock() + protocol = asyncio.StreamReaderProtocol(reader) + self.assertIs(protocol._loop, self.loop) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_subprocess.py b/tests/test_subprocess.py new file mode 100644 index 00000000..63288cef --- /dev/null +++ b/tests/test_subprocess.py @@ -0,0 +1,486 @@ +from trollius import subprocess +from trollius import test_utils +import trollius as asyncio +import os +import signal +import sys +import warnings +from trollius import BrokenPipeError, ConnectionResetError, ProcessLookupError +from trollius import From, Return +from trollius import base_subprocess +from trollius import test_support as support +from trollius.test_utils import mock +from trollius.test_utils import unittest + +if sys.platform != 'win32': + from trollius import unix_events + + +# Program blocking +PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)'] + +# Program copying input to output +if sys.version_info >= (3,): + PROGRAM_CAT = ';'.join(('import sys', + 'data = sys.stdin.buffer.read()', + 'sys.stdout.buffer.write(data)')) +else: + PROGRAM_CAT = ';'.join(('import sys', + 'data = sys.stdin.read()', + 'sys.stdout.write(data)')) +PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT] + +class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport): + def _start(self, *args, **kwargs): + self._proc = mock.Mock() + self._proc.stdin = None + self._proc.stdout = None + self._proc.stderr = None + + +class SubprocessTransportTests(test_utils.TestCase): + def setUp(self): + self.loop = self.new_test_loop() + self.set_event_loop(self.loop) + + + def create_transport(self, waiter=None): + protocol = mock.Mock() + protocol.connection_made._is_coroutine = False + protocol.process_exited._is_coroutine = False + transport = TestSubprocessTransport( + self.loop, protocol, ['test'], False, + None, None, None, 0, waiter=waiter) + return (transport, protocol) + + def test_proc_exited(self): + waiter = asyncio.Future(loop=self.loop) + transport, protocol = self.create_transport(waiter) + transport._process_exited(6) + self.loop.run_until_complete(waiter) + + self.assertEqual(transport.get_returncode(), 6) + + self.assertTrue(protocol.connection_made.called) + self.assertTrue(protocol.process_exited.called) + self.assertTrue(protocol.connection_lost.called) + self.assertEqual(protocol.connection_lost.call_args[0], (None,)) + + self.assertFalse(transport._closed) + self.assertIsNone(transport._loop) + self.assertIsNone(transport._proc) + self.assertIsNone(transport._protocol) + + # methods must raise ProcessLookupError if the process exited + self.assertRaises(ProcessLookupError, + transport.send_signal, signal.SIGTERM) + self.assertRaises(ProcessLookupError, transport.terminate) + self.assertRaises(ProcessLookupError, transport.kill) + + transport.close() + + +class SubprocessMixin: + + def test_stdin_stdout(self): + args = PROGRAM_CAT + + @asyncio.coroutine + def run(data): + proc = yield From(asyncio.create_subprocess_exec( + *args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + loop=self.loop)) + + # feed data + proc.stdin.write(data) + yield From(proc.stdin.drain()) + proc.stdin.close() + + # get output and exitcode + data = yield From(proc.stdout.read()) + exitcode = yield From(proc.wait()) + raise Return(exitcode, data) + + task = run(b'some data') + task = asyncio.wait_for(task, 60.0, loop=self.loop) + exitcode, stdout = self.loop.run_until_complete(task) + self.assertEqual(exitcode, 0) + self.assertEqual(stdout, b'some data') + + def test_communicate(self): + args = PROGRAM_CAT + + @asyncio.coroutine + def run(data): + proc = yield From(asyncio.create_subprocess_exec( + *args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + loop=self.loop)) + stdout, stderr = yield From(proc.communicate(data)) + raise Return(proc.returncode, stdout) + + task = run(b'some data') + task = asyncio.wait_for(task, 60.0, loop=self.loop) + exitcode, stdout = self.loop.run_until_complete(task) + self.assertEqual(exitcode, 0) + self.assertEqual(stdout, b'some data') + + def test_shell(self): + create = asyncio.create_subprocess_shell('exit 7', + loop=self.loop) + proc = self.loop.run_until_complete(create) + exitcode = self.loop.run_until_complete(proc.wait()) + self.assertEqual(exitcode, 7) + + @unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()") + def test_start_new_session(self): + def start_new_session(): + os.setsid() + + # start the new process in a new session + create = asyncio.create_subprocess_shell('exit 8', + preexec_fn=start_new_session, + loop=self.loop) + proc = self.loop.run_until_complete(create) + exitcode = self.loop.run_until_complete(proc.wait()) + self.assertEqual(exitcode, 8) + + def test_kill(self): + args = PROGRAM_BLOCKED + create = asyncio.create_subprocess_exec(*args, loop=self.loop) + proc = self.loop.run_until_complete(create) + proc.kill() + returncode = self.loop.run_until_complete(proc.wait()) + if sys.platform == 'win32': + self.assertIsInstance(returncode, int) + # expect 1 but sometimes get 0 + else: + self.assertEqual(-signal.SIGKILL, returncode) + + def test_terminate(self): + args = PROGRAM_BLOCKED + create = asyncio.create_subprocess_exec(*args, loop=self.loop) + proc = self.loop.run_until_complete(create) + proc.terminate() + returncode = self.loop.run_until_complete(proc.wait()) + if sys.platform == 'win32': + self.assertIsInstance(returncode, int) + # expect 1 but sometimes get 0 + else: + self.assertEqual(-signal.SIGTERM, returncode) + + @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP") + def test_send_signal(self): + code = '; '.join(( + 'import sys, time', + 'print("sleeping")', + 'sys.stdout.flush()', + 'time.sleep(3600)')) + args = [sys.executable, '-c', code] + create = asyncio.create_subprocess_exec(*args, + stdout=subprocess.PIPE, + loop=self.loop) + proc = self.loop.run_until_complete(create) + + @asyncio.coroutine + def send_signal(proc): + # basic synchronization to wait until the program is sleeping + line = yield From(proc.stdout.readline()) + self.assertEqual(line, b'sleeping\n') + + proc.send_signal(signal.SIGHUP) + returncode = yield From(proc.wait()) + raise Return(returncode) + + returncode = self.loop.run_until_complete(send_signal(proc)) + self.assertEqual(-signal.SIGHUP, returncode) + + def prepare_broken_pipe_test(self): + # buffer large enough to feed the whole pipe buffer + large_data = b'x' * support.PIPE_MAX_SIZE + + # the program ends before the stdin can be feeded + create = asyncio.create_subprocess_exec( + sys.executable, '-c', 'pass', + stdin=subprocess.PIPE, + loop=self.loop) + proc = self.loop.run_until_complete(create) + return (proc, large_data) + + def test_stdin_broken_pipe(self): + proc, large_data = self.prepare_broken_pipe_test() + + @asyncio.coroutine + def write_stdin(proc, data): + proc.stdin.write(data) + yield From(proc.stdin.drain()) + + coro = write_stdin(proc, large_data) + # drain() must raise BrokenPipeError or ConnectionResetError + with test_utils.disable_logger(): + self.assertRaises((BrokenPipeError, ConnectionResetError), + self.loop.run_until_complete, coro) + self.loop.run_until_complete(proc.wait()) + + def test_communicate_ignore_broken_pipe(self): + proc, large_data = self.prepare_broken_pipe_test() + + # communicate() must ignore BrokenPipeError when feeding stdin + with test_utils.disable_logger(): + self.loop.run_until_complete(proc.communicate(large_data)) + self.loop.run_until_complete(proc.wait()) + + def test_pause_reading(self): + limit = 10 + size = (limit * 2 + 1) + + @asyncio.coroutine + def test_pause_reading(): + code = '\n'.join(( + 'import sys', + 'sys.stdout.write("x" * %s)' % size, + 'sys.stdout.flush()', + )) + + connect_read_pipe = self.loop.connect_read_pipe + + @asyncio.coroutine + def connect_read_pipe_mock(*args, **kw): + connect = connect_read_pipe(*args, **kw) + transport, protocol = yield From(connect) + transport.pause_reading = mock.Mock() + transport.resume_reading = mock.Mock() + raise Return(transport, protocol) + + self.loop.connect_read_pipe = connect_read_pipe_mock + + proc = yield From(asyncio.create_subprocess_exec( + sys.executable, '-c', code, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + limit=limit, + loop=self.loop)) + stdout_transport = proc._transport.get_pipe_transport(1) + + stdout, stderr = yield From(proc.communicate()) + + # The child process produced more than limit bytes of output, + # the stream reader transport should pause the protocol to not + # allocate too much memory. + raise Return(stdout, stdout_transport) + + # Issue #22685: Ensure that the stream reader pauses the protocol + # when the child process produces too much data + stdout, transport = self.loop.run_until_complete(test_pause_reading()) + + self.assertEqual(stdout, b'x' * size) + self.assertTrue(transport.pause_reading.called) + self.assertTrue(transport.resume_reading.called) + + def test_stdin_not_inheritable(self): + # asyncio issue #209: stdin must not be inheritable, otherwise + # the Process.communicate() hangs + @asyncio.coroutine + def len_message(message): + code = 'import sys; data = sys.stdin.read(); print(len(data))' + proc = yield From(asyncio.create_subprocess_exec( + sys.executable, '-c', code, + stdin=asyncio.subprocess.PIPE, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + close_fds=False, + loop=self.loop)) + stdout, stderr = yield From(proc.communicate(message)) + exitcode = yield From(proc.wait()) + raise Return(stdout, exitcode) + + output, exitcode = self.loop.run_until_complete(len_message(b'abc')) + self.assertEqual(output.rstrip(), b'3') + self.assertEqual(exitcode, 0) + + def test_cancel_process_wait(self): + # Issue #23140: cancel Process.wait() + + @asyncio.coroutine + def cancel_wait(): + proc = yield From(asyncio.create_subprocess_exec( + *PROGRAM_BLOCKED, + loop=self.loop)) + + # Create an internal future waiting on the process exit + task = self.loop.create_task(proc.wait()) + self.loop.call_soon(task.cancel) + try: + yield From(task) + except asyncio.CancelledError: + pass + + # Cancel the future + task.cancel() + + # Kill the process and wait until it is done + proc.kill() + yield From(proc.wait()) + + self.loop.run_until_complete(cancel_wait()) + + def test_cancel_make_subprocess_transport_exec(self): + @asyncio.coroutine + def cancel_make_transport(): + coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED, + loop=self.loop) + task = self.loop.create_task(coro) + + self.loop.call_soon(task.cancel) + try: + yield From(task) + except asyncio.CancelledError: + pass + + # ignore the log: + # "Exception during subprocess creation, kill the subprocess" + with test_utils.disable_logger(): + self.loop.run_until_complete(cancel_make_transport()) + + def test_cancel_post_init(self): + @asyncio.coroutine + def cancel_make_transport(): + coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol, + *PROGRAM_BLOCKED) + task = self.loop.create_task(coro) + + self.loop.call_soon(task.cancel) + try: + yield From(task) + except asyncio.CancelledError: + pass + + # ignore the log: + # "Exception during subprocess creation, kill the subprocess" + with test_utils.disable_logger(): + self.loop.run_until_complete(cancel_make_transport()) + test_utils.run_briefly(self.loop) + + def test_close_kill_running(self): + @asyncio.coroutine + def kill_running(): + create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, + *PROGRAM_BLOCKED) + transport, protocol = yield From(create) + + non_local = {'kill_called': False} + def kill(): + non_local['kill_called'] = True + orig_kill() + + proc = transport.get_extra_info('subprocess') + orig_kill = proc.kill + proc.kill = kill + returncode = transport.get_returncode() + transport.close() + yield From(transport._wait()) + raise Return(returncode, non_local['kill_called']) + + # Ignore "Close running child process: kill ..." log + with test_utils.disable_logger(): + returncode, killed = self.loop.run_until_complete(kill_running()) + self.assertIsNone(returncode) + + # transport.close() must kill the process if it is still running + self.assertTrue(killed) + test_utils.run_briefly(self.loop) + + def test_close_dont_kill_finished(self): + @asyncio.coroutine + def kill_running(): + create = self.loop.subprocess_exec(asyncio.SubprocessProtocol, + *PROGRAM_BLOCKED) + transport, protocol = yield From(create) + proc = transport.get_extra_info('subprocess') + + # kill the process (but asyncio is not notified immediatly) + proc.kill() + proc.wait() + + proc.kill = mock.Mock() + proc_returncode = proc.poll() + transport_returncode = transport.get_returncode() + transport.close() + raise Return(proc_returncode, transport_returncode, + proc.kill.called) + + # Ignore "Unknown child process pid ..." log of SafeChildWatcher, + # emitted because the test already consumes the exit status: + # proc.wait() + with test_utils.disable_logger(): + result = self.loop.run_until_complete(kill_running()) + test_utils.run_briefly(self.loop) + + proc_returncode, transport_return_code, killed = result + + self.assertIsNotNone(proc_returncode) + self.assertIsNone(transport_return_code) + + # transport.close() must not kill the process if it finished, even if + # the transport was not notified yet + self.assertFalse(killed) + + def test_popen_error(self): + # Issue #24763: check that the subprocess transport is closed + # when BaseSubprocessTransport fails + if sys.platform == 'win32': + target = 'trollius.windows_utils.Popen' + else: + target = 'subprocess.Popen' + with mock.patch(target) as popen: + exc = ZeroDivisionError + popen.side_effect = exc + + create = asyncio.create_subprocess_exec(sys.executable, '-c', + 'pass', loop=self.loop) + with warnings.catch_warnings(record=True) as warns: + with self.assertRaises(exc): + self.loop.run_until_complete(create) + self.assertEqual(warns, []) + + +if sys.platform != 'win32': + # Unix + class SubprocessWatcherMixin(SubprocessMixin): + + Watcher = None + + def setUp(self): + policy = asyncio.get_event_loop_policy() + self.loop = policy.new_event_loop() + self.set_event_loop(self.loop) + + watcher = self.Watcher() + watcher.attach_loop(self.loop) + policy.set_child_watcher(watcher) + self.addCleanup(policy.set_child_watcher, None) + + class SubprocessSafeWatcherTests(SubprocessWatcherMixin, + test_utils.TestCase): + + Watcher = unix_events.SafeChildWatcher + + class SubprocessFastWatcherTests(SubprocessWatcherMixin, + test_utils.TestCase): + + Watcher = unix_events.FastChildWatcher + +else: + # Windows + class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.ProactorEventLoop() + self.set_event_loop(self.loop) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_tasks.py b/tests/test_tasks.py new file mode 100644 index 00000000..9aad2596 --- /dev/null +++ b/tests/test_tasks.py @@ -0,0 +1,2066 @@ +"""Tests for tasks.py.""" + +import contextlib +import functools +import io +import os +import re +import six +import sys +import types +import weakref + +import trollius as asyncio +from trollius import From, Return +from trollius import coroutines +from trollius import test_support as support +from trollius import test_utils +from trollius.test_utils import mock +from trollius.test_utils import unittest + + +PY33 = (sys.version_info >= (3, 3)) +PY34 = (sys.version_info >= (3, 4)) +PY35 = (sys.version_info >= (3, 5)) + + +@asyncio.coroutine +def coroutine_function(): + pass + +@asyncio.coroutine +def coroutine_function2(x, y): + yield From(asyncio.sleep(0)) + +@contextlib.contextmanager +def set_coroutine_debug(enabled): + coroutines = asyncio.coroutines + + old_debug = coroutines._DEBUG + try: + coroutines._DEBUG = enabled + yield + finally: + coroutines._DEBUG = old_debug + + + +def format_coroutine(qualname, state, src, source_traceback, generator=False): + if generator: + state = '%s' % state + else: + state = '%s, defined' % state + if source_traceback is not None: + frame = source_traceback[-1] + return ('coro=<%s() %s at %s> created at %s:%s' + % (qualname, state, src, frame[0], frame[1])) + else: + return 'coro=<%s() %s at %s>' % (qualname, state, src) + + +class Dummy: + + def __repr__(self): + return '' + + def __call__(self, *args): + pass + + +class TaskTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + + def test_task_class(self): + @asyncio.coroutine + def notmuch(): + return 'ok' + t = asyncio.Task(notmuch(), loop=self.loop) + self.loop.run_until_complete(t) + self.assertTrue(t.done()) + self.assertEqual(t.result(), 'ok') + self.assertIs(t._loop, self.loop) + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + t = asyncio.Task(notmuch(), loop=loop) + self.assertIs(t._loop, loop) + loop.run_until_complete(t) + loop.close() + + def test_ensure_future_coroutine(self): + @asyncio.coroutine + def notmuch(): + return 'ok' + t = asyncio.ensure_future(notmuch(), loop=self.loop) + self.loop.run_until_complete(t) + self.assertTrue(t.done()) + self.assertEqual(t.result(), 'ok') + self.assertIs(t._loop, self.loop) + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + t = asyncio.ensure_future(notmuch(), loop=loop) + self.assertIs(t._loop, loop) + loop.run_until_complete(t) + loop.close() + + def test_ensure_future_future(self): + f_orig = asyncio.Future(loop=self.loop) + f_orig.set_result('ko') + + f = asyncio.ensure_future(f_orig) + self.loop.run_until_complete(f) + self.assertTrue(f.done()) + self.assertEqual(f.result(), 'ko') + self.assertIs(f, f_orig) + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + + with self.assertRaises(ValueError): + f = asyncio.ensure_future(f_orig, loop=loop) + + loop.close() + + f = asyncio.ensure_future(f_orig, loop=self.loop) + self.assertIs(f, f_orig) + + def test_ensure_future_task(self): + @asyncio.coroutine + def notmuch(): + return 'ok' + t_orig = asyncio.Task(notmuch(), loop=self.loop) + t = asyncio.ensure_future(t_orig) + self.loop.run_until_complete(t) + self.assertTrue(t.done()) + self.assertEqual(t.result(), 'ok') + self.assertIs(t, t_orig) + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + + with self.assertRaises(ValueError): + t = asyncio.ensure_future(t_orig, loop=loop) + + loop.close() + + t = asyncio.ensure_future(t_orig, loop=self.loop) + self.assertIs(t, t_orig) + + def test_ensure_future_neither(self): + with self.assertRaises(TypeError): + asyncio.ensure_future('ok') + + def test_async_warning(self): + f = asyncio.Future(loop=self.loop) + with self.assertWarnsRegex(DeprecationWarning, + 'function is deprecated, use ensure_'): + self.assertIs(f, asyncio.async(f)) + + @unittest.skipIf(PY35, 'FIXME: test broken on Python 3.5') + def test_get_stack(self): + non_local = {'T': None} + + @asyncio.coroutine + def foo(): + yield From(bar()) + + @asyncio.coroutine + def bar(): + T = non_local['T'] + # test get_stack() + f = T.get_stack(limit=1) + try: + self.assertEqual(f[0].f_code.co_name, 'foo') + finally: + f = None + + # test print_stack() + file = six.StringIO() + T.print_stack(limit=1, file=file) + file.seek(0) + tb = file.read() + self.assertRegex(tb, r'foo\(\) running') + + @asyncio.coroutine + def runner(): + non_local['T'] = asyncio.ensure_future(foo(), loop=self.loop) + yield From(non_local['T']) + + self.loop.run_until_complete(runner()) + + def test_task_repr(self): + self.loop.set_debug(False) + + @asyncio.coroutine + def notmuch(): + yield From(None) + raise Return('abc') + + # test coroutine function + self.assertEqual(notmuch.__name__, 'notmuch') + if PY35: + self.assertEqual(notmuch.__qualname__, + 'TaskTests.test_task_repr..notmuch') + self.assertEqual(notmuch.__module__, __name__) + + filename, lineno = test_utils.get_function_source(notmuch) + src = "%s:%s" % (filename, lineno) + + # test coroutine object + gen = notmuch() + if PY35 or (coroutines._DEBUG and PY33): + coro_qualname = 'TaskTests.test_task_repr..notmuch' + else: + coro_qualname = 'notmuch' + self.assertEqual(gen.__name__, 'notmuch') + if PY35: + self.assertEqual(gen.__qualname__, + coro_qualname) + + # test pending Task + t = asyncio.Task(gen, loop=self.loop) + t.add_done_callback(Dummy()) + + coro = format_coroutine(coro_qualname, 'running', src, + t._source_traceback, generator=True) + # FIXME: it correctly broken on Python 3.5+ + if not coroutines._PEP479: + self.assertEqual(repr(t), + '()]>' % coro) + + # test cancelling Task + t.cancel() # Does not take immediate effect! + # FIXME: it correctly broken on Python 3.5+ + if not coroutines._PEP479: + self.assertEqual(repr(t), + '()]>' % coro) + + # test cancelled Task + self.assertRaises(asyncio.CancelledError, + self.loop.run_until_complete, t) + coro = format_coroutine(coro_qualname, 'done', src, + t._source_traceback) + # FIXME: it correctly broken on Python 3.5+ + if not coroutines._PEP479: + self.assertEqual(repr(t), + '' % coro) + + # test finished Task + t = asyncio.Task(notmuch(), loop=self.loop) + self.loop.run_until_complete(t) + coro = format_coroutine(coro_qualname, 'done', src, + t._source_traceback) + # FIXME: it correctly broken on Python 3.5+ + if not coroutines._PEP479: + self.assertEqual(repr(t), + "" % coro) + + def test_task_repr_coro_decorator(self): + self.loop.set_debug(False) + + @asyncio.coroutine + def notmuch(): + # notmuch() function doesn't use yield: it will be wrapped by + # @coroutine decorator + return 123 + + # test coroutine function + self.assertEqual(notmuch.__name__, 'notmuch') + if PY35: + self.assertEqual(notmuch.__qualname__, + 'TaskTests.test_task_repr_coro_decorator' + '..notmuch') + self.assertEqual(notmuch.__module__, __name__) + + # test coroutine object + gen = notmuch() + if PY35 or coroutines._DEBUG: + # On Python >= 3.5, generators now inherit the name of the + # function, as expected, and have a qualified name (__qualname__ + # attribute). + coro_name = 'notmuch' + if PY35 or (coroutines._DEBUG and PY33): + coro_qualname = ('TaskTests.test_task_repr_coro_decorator' + '..notmuch') + else: + coro_qualname = 'notmuch' + else: + # On Python < 3.5, generators inherit the name of the code, not of + # the function. See: http://bugs.python.org/issue21205 + coro_name = coro_qualname = 'coro' + self.assertEqual(gen.__name__, coro_name) + if PY35: + self.assertEqual(gen.__qualname__, coro_qualname) + + # test repr(CoroWrapper) + if coroutines._DEBUG: + # format the coroutine object + if coroutines._DEBUG: + filename, lineno = test_utils.get_function_source(notmuch) + frame = gen._source_traceback[-1] + coro = ('%s() running, defined at %s:%s, created at %s:%s' + % (coro_qualname, filename, lineno, + frame[0], frame[1])) + else: + code = gen.gi_code + coro = ('%s() running at %s:%s' + % (coro_qualname, code.co_filename, + code.co_firstlineno)) + + self.assertEqual(repr(gen), '' % coro) + + # test pending Task + t = asyncio.Task(gen, loop=self.loop) + t.add_done_callback(Dummy()) + + # format the coroutine object + if coroutines._DEBUG: + src = '%s:%s' % test_utils.get_function_source(notmuch) + else: + code = gen.gi_code + src = '%s:%s' % (code.co_filename, code.co_firstlineno) + coro = format_coroutine(coro_qualname, 'running', src, + t._source_traceback, + generator=not coroutines._DEBUG) + self.assertEqual(repr(t), + '()]>' % coro) + self.loop.run_until_complete(t) + + def test_task_repr_wait_for(self): + self.loop.set_debug(False) + + @asyncio.coroutine + def wait_for(fut): + res = yield From(fut) + raise Return(res) + + fut = asyncio.Future(loop=self.loop) + task = asyncio.Task(wait_for(fut), loop=self.loop) + test_utils.run_briefly(self.loop) + self.assertRegex(repr(task), + '' % re.escape(repr(fut))) + + fut.set_result(None) + self.loop.run_until_complete(task) + + def test_task_repr_partial_corowrapper(self): + # Issue #222: repr(CoroWrapper) must not fail in debug mode if the + # coroutine is a partial function + with set_coroutine_debug(True): + self.loop.set_debug(True) + + cb = functools.partial(coroutine_function2, 1) + partial_func = asyncio.coroutine(cb) + task = self.loop.create_task(partial_func(2)) + + # make warnings quiet + task._log_destroy_pending = False + self.addCleanup(task._coro.close) + + coro_repr = repr(task._coro) + expected = (' ' + r'was never yielded from\n' + r'Coroutine object created at \(most recent call last\):\n' + r'.*\n' + r' File "%s", line %s, in test_coroutine_never_yielded\n' + r' coro_noop\(\)$' + % (re.escape(coro_name), + re.escape(func_filename), func_lineno, + re.escape(tb_filename), tb_lineno)) + + self.assertRegex(message, re.compile(regex, re.DOTALL)) + + def test_task_source_traceback(self): + self.loop.set_debug(True) + + task = asyncio.Task(coroutine_function(), loop=self.loop) + lineno = sys._getframe().f_lineno - 1 + self.assertIsInstance(task._source_traceback, list) + filename = sys._getframe().f_code.co_filename + self.assertEqual(task._source_traceback[-1][:3], + (filename, + lineno, + 'test_task_source_traceback')) + self.loop.run_until_complete(task) + + def _test_cancel_wait_for(self, timeout): + loop = asyncio.new_event_loop() + self.addCleanup(loop.close) + + @asyncio.coroutine + def blocking_coroutine(): + fut = asyncio.Future(loop=loop) + # Block: fut result is never set + yield From(fut) + + task = loop.create_task(blocking_coroutine()) + + wait = loop.create_task(asyncio.wait_for(task, timeout, loop=loop)) + loop.call_soon(wait.cancel) + + self.assertRaises(asyncio.CancelledError, + loop.run_until_complete, wait) + + # Python issue #23219: cancelling the wait must also cancel the task + self.assertTrue(task.cancelled()) + + def test_cancel_blocking_wait_for(self): + self._test_cancel_wait_for(None) + + def test_cancel_wait_for(self): + self._test_cancel_wait_for(60.0) + + +class GatherTestsBase: + + def setUp(self): + self.one_loop = self.new_test_loop() + self.other_loop = self.new_test_loop() + self.set_event_loop(self.one_loop, cleanup=False) + + def _run_loop(self, loop): + while loop._ready: + test_utils.run_briefly(loop) + + def _check_success(self, **kwargs): + a, b, c = [asyncio.Future(loop=self.one_loop) for i in range(3)] + fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs) + cb = test_utils.MockCallback() + fut.add_done_callback(cb) + b.set_result(1) + a.set_result(2) + self._run_loop(self.one_loop) + self.assertEqual(cb.called, False) + self.assertFalse(fut.done()) + c.set_result(3) + self._run_loop(self.one_loop) + cb.assert_called_once_with(fut) + self.assertEqual(fut.result(), [2, 1, 3]) + + def test_success(self): + self._check_success() + self._check_success(return_exceptions=False) + + def test_result_exception_success(self): + self._check_success(return_exceptions=True) + + def test_one_exception(self): + a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] + fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e)) + cb = test_utils.MockCallback() + fut.add_done_callback(cb) + exc = ZeroDivisionError() + a.set_result(1) + b.set_exception(exc) + self._run_loop(self.one_loop) + self.assertTrue(fut.done()) + cb.assert_called_once_with(fut) + self.assertIs(fut.exception(), exc) + # Does nothing + c.set_result(3) + d.cancel() + e.set_exception(RuntimeError()) + e.exception() + + def test_return_exceptions(self): + a, b, c, d = [asyncio.Future(loop=self.one_loop) for i in range(4)] + fut = asyncio.gather(*self.wrap_futures(a, b, c, d), + return_exceptions=True) + cb = test_utils.MockCallback() + fut.add_done_callback(cb) + exc = ZeroDivisionError() + exc2 = RuntimeError() + b.set_result(1) + c.set_exception(exc) + a.set_result(3) + self._run_loop(self.one_loop) + self.assertFalse(fut.done()) + d.set_exception(exc2) + self._run_loop(self.one_loop) + self.assertTrue(fut.done()) + cb.assert_called_once_with(fut) + self.assertEqual(fut.result(), [3, 1, exc, exc2]) + + def test_env_var_debug(self): + aio_path = os.path.dirname(os.path.dirname(asyncio.__file__)) + + code = '\n'.join(( + 'import trollius.coroutines', + 'print(trollius.coroutines._DEBUG)')) + + sts, stdout, stderr = support.assert_python_ok('-c', code, + TROLLIUSDEBUG='', + PYTHONPATH=aio_path) + self.assertEqual(stdout.rstrip(), b'False') + + sts, stdout, stderr = support.assert_python_ok('-c', code, + TROLLIUSDEBUG='1', + PYTHONPATH=aio_path) + self.assertEqual(stdout.rstrip(), b'True') + + +class FutureGatherTests(GatherTestsBase, test_utils.TestCase): + + def wrap_futures(self, *futures): + return futures + + def _check_empty_sequence(self, seq_or_iter): + asyncio.set_event_loop(self.one_loop) + self.addCleanup(asyncio.set_event_loop, None) + fut = asyncio.gather(*seq_or_iter) + self.assertIsInstance(fut, asyncio.Future) + self.assertIs(fut._loop, self.one_loop) + self._run_loop(self.one_loop) + self.assertTrue(fut.done()) + self.assertEqual(fut.result(), []) + fut = asyncio.gather(*seq_or_iter, loop=self.other_loop) + self.assertIs(fut._loop, self.other_loop) + + def test_constructor_empty_sequence(self): + self._check_empty_sequence([]) + self._check_empty_sequence(()) + self._check_empty_sequence(set()) + self._check_empty_sequence(iter("")) + + def test_constructor_heterogenous_futures(self): + fut1 = asyncio.Future(loop=self.one_loop) + fut2 = asyncio.Future(loop=self.other_loop) + with self.assertRaises(ValueError): + asyncio.gather(fut1, fut2) + with self.assertRaises(ValueError): + asyncio.gather(fut1, loop=self.other_loop) + + def test_constructor_homogenous_futures(self): + children = [asyncio.Future(loop=self.other_loop) for i in range(3)] + fut = asyncio.gather(*children) + self.assertIs(fut._loop, self.other_loop) + self._run_loop(self.other_loop) + self.assertFalse(fut.done()) + fut = asyncio.gather(*children, loop=self.other_loop) + self.assertIs(fut._loop, self.other_loop) + self._run_loop(self.other_loop) + self.assertFalse(fut.done()) + + def test_one_cancellation(self): + a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] + fut = asyncio.gather(a, b, c, d, e) + cb = test_utils.MockCallback() + fut.add_done_callback(cb) + a.set_result(1) + b.cancel() + self._run_loop(self.one_loop) + self.assertTrue(fut.done()) + cb.assert_called_once_with(fut) + self.assertFalse(fut.cancelled()) + self.assertIsInstance(fut.exception(), asyncio.CancelledError) + # Does nothing + c.set_result(3) + d.cancel() + e.set_exception(RuntimeError()) + e.exception() + + def test_result_exception_one_cancellation(self): + a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop) + for i in range(6)] + fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True) + cb = test_utils.MockCallback() + fut.add_done_callback(cb) + a.set_result(1) + zde = ZeroDivisionError() + b.set_exception(zde) + c.cancel() + self._run_loop(self.one_loop) + self.assertFalse(fut.done()) + d.set_result(3) + e.cancel() + rte = RuntimeError() + f.set_exception(rte) + res = self.one_loop.run_until_complete(fut) + self.assertIsInstance(res[2], asyncio.CancelledError) + self.assertIsInstance(res[4], asyncio.CancelledError) + res[2] = res[4] = None + self.assertEqual(res, [1, zde, None, 3, None, rte]) + cb.assert_called_once_with(fut) + + +class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase): + + def setUp(self): + super(CoroutineGatherTests, self).setUp() + asyncio.set_event_loop(self.one_loop) + + def wrap_futures(self, *futures): + coros = [] + for fut in futures: + @asyncio.coroutine + def coro(fut=fut): + result = (yield From(fut)) + raise Return(result) + coros.append(coro()) + return coros + + def test_constructor_loop_selection(self): + @asyncio.coroutine + def coro(): + return 'abc' + gen1 = coro() + gen2 = coro() + fut = asyncio.gather(gen1, gen2) + self.assertIs(fut._loop, self.one_loop) + self.one_loop.run_until_complete(fut) + + self.set_event_loop(self.other_loop, cleanup=False) + gen3 = coro() + gen4 = coro() + fut2 = asyncio.gather(gen3, gen4, loop=self.other_loop) + self.assertIs(fut2._loop, self.other_loop) + self.other_loop.run_until_complete(fut2) + + def test_duplicate_coroutines(self): + @asyncio.coroutine + def coro(s): + return s + c = coro('abc') + fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop) + self._run_loop(self.one_loop) + self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc']) + + def test_cancellation_broadcast(self): + # Cancelling outer() cancels all children. + non_local = {'proof': 0} + waiter = asyncio.Future(loop=self.one_loop) + + @asyncio.coroutine + def inner(): + yield From(waiter) + non_local['proof'] += 1 + + child1 = asyncio.ensure_future(inner(), loop=self.one_loop) + child2 = asyncio.ensure_future(inner(), loop=self.one_loop) + non_local['gatherer'] = None + + @asyncio.coroutine + def outer(): + non_local['gatherer'] = asyncio.gather(child1, child2, loop=self.one_loop) + yield From(non_local['gatherer']) + non_local['proof'] += 100 + + f = asyncio.ensure_future(outer(), loop=self.one_loop) + test_utils.run_briefly(self.one_loop) + self.assertTrue(f.cancel()) + with self.assertRaises(asyncio.CancelledError): + self.one_loop.run_until_complete(f) + self.assertFalse(non_local['gatherer'].cancel()) + self.assertTrue(waiter.cancelled()) + self.assertTrue(child1.cancelled()) + self.assertTrue(child2.cancelled()) + test_utils.run_briefly(self.one_loop) + self.assertEqual(non_local['proof'], 0) + + def test_exception_marking(self): + # Test for the first line marked "Mark exception retrieved." + + @asyncio.coroutine + def inner(f): + yield From(f) + raise RuntimeError('should not be ignored') + + a = asyncio.Future(loop=self.one_loop) + b = asyncio.Future(loop=self.one_loop) + + @asyncio.coroutine + def outer(): + yield From(asyncio.gather(inner(a), inner(b), loop=self.one_loop)) + + f = asyncio.ensure_future(outer(), loop=self.one_loop) + test_utils.run_briefly(self.one_loop) + a.set_result(None) + test_utils.run_briefly(self.one_loop) + b.set_result(None) + test_utils.run_briefly(self.one_loop) + self.assertIsInstance(f.exception(), RuntimeError) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_transports.py b/tests/test_transports.py new file mode 100644 index 00000000..d4c57805 --- /dev/null +++ b/tests/test_transports.py @@ -0,0 +1,97 @@ +"""Tests for transports.py.""" + +import trollius as asyncio +from trollius import test_utils +from trollius import transports +from trollius.test_utils import mock +from trollius.test_utils import unittest + +try: + memoryview +except NameError: + # Python 2.6 + memoryview = buffer + + +class TransportTests(test_utils.TestCase): + + def test_ctor_extra_is_none(self): + transport = asyncio.Transport() + self.assertEqual(transport._extra, {}) + + def test_get_extra_info(self): + transport = asyncio.Transport({'extra': 'info'}) + self.assertEqual('info', transport.get_extra_info('extra')) + self.assertIsNone(transport.get_extra_info('unknown')) + + default = object() + self.assertIs(default, transport.get_extra_info('unknown', default)) + + def test_writelines(self): + transport = asyncio.Transport() + transport.write = mock.Mock() + + transport.writelines([b'line1', + bytearray(b'line2'), + memoryview(b'line3')]) + self.assertEqual(1, transport.write.call_count) + transport.write.assert_called_with(b'line1line2line3') + + def test_not_implemented(self): + transport = asyncio.Transport() + + self.assertRaises(NotImplementedError, + transport.set_write_buffer_limits) + self.assertRaises(NotImplementedError, transport.get_write_buffer_size) + self.assertRaises(NotImplementedError, transport.write, 'data') + self.assertRaises(NotImplementedError, transport.write_eof) + self.assertRaises(NotImplementedError, transport.can_write_eof) + self.assertRaises(NotImplementedError, transport.pause_reading) + self.assertRaises(NotImplementedError, transport.resume_reading) + self.assertRaises(NotImplementedError, transport.close) + self.assertRaises(NotImplementedError, transport.abort) + + def test_dgram_not_implemented(self): + transport = asyncio.DatagramTransport() + + self.assertRaises(NotImplementedError, transport.sendto, 'data') + self.assertRaises(NotImplementedError, transport.abort) + + def test_subprocess_transport_not_implemented(self): + transport = asyncio.SubprocessTransport() + + self.assertRaises(NotImplementedError, transport.get_pid) + self.assertRaises(NotImplementedError, transport.get_returncode) + self.assertRaises(NotImplementedError, transport.get_pipe_transport, 1) + self.assertRaises(NotImplementedError, transport.send_signal, 1) + self.assertRaises(NotImplementedError, transport.terminate) + self.assertRaises(NotImplementedError, transport.kill) + + def test_flowcontrol_mixin_set_write_limits(self): + + class MyTransport(transports._FlowControlMixin, + transports.Transport): + + def get_write_buffer_size(self): + return 512 + + loop = mock.Mock() + transport = MyTransport(loop=loop) + transport._protocol = mock.Mock() + + self.assertFalse(transport._protocol_paused) + + with self.assertRaisesRegex(ValueError, 'high.*must be >= low'): + transport.set_write_buffer_limits(high=0, low=1) + + transport.set_write_buffer_limits(high=1024, low=128) + self.assertFalse(transport._protocol_paused) + self.assertEqual(transport.get_write_buffer_limits(), (128, 1024)) + + transport.set_write_buffer_limits(high=256, low=128) + self.assertTrue(transport._protocol_paused) + self.assertEqual(transport.get_write_buffer_limits(), (128, 256)) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_unix_events.py b/tests/test_unix_events.py new file mode 100644 index 00000000..b6b5bdcc --- /dev/null +++ b/tests/test_unix_events.py @@ -0,0 +1,1572 @@ +"""Tests for unix_events.py.""" + +import collections +import contextlib +import errno +import io +import os +import signal +import socket +import stat +import sys +import tempfile +import threading +from trollius.test_utils import unittest + +if sys.platform == 'win32': + raise unittest.SkipTest('UNIX only') + +import trollius as asyncio +from trollius import log +from trollius import test_utils +from trollius import unix_events +from trollius.py33_exceptions import BlockingIOError, ChildProcessError +from trollius.test_utils import mock + + +MOCK_ANY = mock.ANY + + +def close_pipe_transport(transport): + # Don't call transport.close() because the event loop and the selector + # are mocked + if transport._pipe is None: + return + transport._pipe.close() + transport._pipe = None + + +@unittest.skipUnless(signal, 'Signals are not supported') +class SelectorEventLoopSignalTests(test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.SelectorEventLoop() + self.set_event_loop(self.loop) + + def test_check_signal(self): + self.assertRaises( + TypeError, self.loop._check_signal, '1') + self.assertRaises( + ValueError, self.loop._check_signal, signal.NSIG + 1) + + def test_handle_signal_no_handler(self): + self.loop._handle_signal(signal.NSIG + 1) + + def test_handle_signal_cancelled_handler(self): + h = asyncio.Handle(mock.Mock(), (), + loop=mock.Mock()) + h.cancel() + self.loop._signal_handlers[signal.NSIG + 1] = h + self.loop.remove_signal_handler = mock.Mock() + self.loop._handle_signal(signal.NSIG + 1) + self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1) + + @mock.patch('trollius.unix_events.signal') + def test_add_signal_handler_setup_error(self, m_signal): + m_signal.NSIG = signal.NSIG + m_signal.set_wakeup_fd.side_effect = ValueError + + self.assertRaises( + RuntimeError, + self.loop.add_signal_handler, + signal.SIGINT, lambda: True) + + @mock.patch('trollius.unix_events.signal') + def test_add_signal_handler_coroutine_error(self, m_signal): + m_signal.NSIG = signal.NSIG + + @asyncio.coroutine + def simple_coroutine(): + yield None + + # callback must not be a coroutine function + coro_func = simple_coroutine + coro_obj = coro_func() + self.addCleanup(coro_obj.close) + for func in (coro_func, coro_obj): + self.assertRaisesRegex( + TypeError, 'coroutines cannot be used with add_signal_handler', + self.loop.add_signal_handler, + signal.SIGINT, func) + + @mock.patch('trollius.unix_events.signal') + def test_add_signal_handler(self, m_signal): + m_signal.NSIG = signal.NSIG + + cb = lambda: True + self.loop.add_signal_handler(signal.SIGHUP, cb) + h = self.loop._signal_handlers.get(signal.SIGHUP) + self.assertIsInstance(h, asyncio.Handle) + self.assertEqual(h._callback, cb) + + @mock.patch('trollius.unix_events.signal') + def test_add_signal_handler_install_error(self, m_signal): + m_signal.NSIG = signal.NSIG + + def set_wakeup_fd(fd): + if fd == -1: + raise ValueError() + m_signal.set_wakeup_fd = set_wakeup_fd + + class Err(OSError): + errno = errno.EFAULT + m_signal.signal.side_effect = Err + + self.assertRaises( + Err, + self.loop.add_signal_handler, + signal.SIGINT, lambda: True) + + @mock.patch('trollius.unix_events.signal') + @mock.patch('trollius.base_events.logger') + def test_add_signal_handler_install_error2(self, m_logging, m_signal): + m_signal.NSIG = signal.NSIG + + class Err(OSError): + errno = errno.EINVAL + m_signal.signal.side_effect = Err + + self.loop._signal_handlers[signal.SIGHUP] = lambda: True + self.assertRaises( + RuntimeError, + self.loop.add_signal_handler, + signal.SIGINT, lambda: True) + self.assertFalse(m_logging.info.called) + self.assertEqual(1, m_signal.set_wakeup_fd.call_count) + + @mock.patch('trollius.unix_events.signal') + @mock.patch('trollius.base_events.logger') + def test_add_signal_handler_install_error3(self, m_logging, m_signal): + class Err(OSError): + errno = errno.EINVAL + m_signal.signal.side_effect = Err + m_signal.NSIG = signal.NSIG + + self.assertRaises( + RuntimeError, + self.loop.add_signal_handler, + signal.SIGINT, lambda: True) + self.assertFalse(m_logging.info.called) + self.assertEqual(2, m_signal.set_wakeup_fd.call_count) + + @mock.patch('trollius.unix_events.signal') + def test_remove_signal_handler(self, m_signal): + m_signal.NSIG = signal.NSIG + + self.loop.add_signal_handler(signal.SIGHUP, lambda: True) + + self.assertTrue( + self.loop.remove_signal_handler(signal.SIGHUP)) + self.assertTrue(m_signal.set_wakeup_fd.called) + self.assertTrue(m_signal.signal.called) + self.assertEqual( + (signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0]) + + @mock.patch('trollius.unix_events.signal') + def test_remove_signal_handler_2(self, m_signal): + m_signal.NSIG = signal.NSIG + m_signal.SIGINT = signal.SIGINT + + self.loop.add_signal_handler(signal.SIGINT, lambda: True) + self.loop._signal_handlers[signal.SIGHUP] = object() + m_signal.set_wakeup_fd.reset_mock() + + self.assertTrue( + self.loop.remove_signal_handler(signal.SIGINT)) + self.assertFalse(m_signal.set_wakeup_fd.called) + self.assertTrue(m_signal.signal.called) + self.assertEqual( + (signal.SIGINT, m_signal.default_int_handler), + m_signal.signal.call_args[0]) + + @mock.patch('trollius.unix_events.signal') + @mock.patch('trollius.base_events.logger') + def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal): + m_signal.NSIG = signal.NSIG + self.loop.add_signal_handler(signal.SIGHUP, lambda: True) + + m_signal.set_wakeup_fd.side_effect = ValueError + + self.loop.remove_signal_handler(signal.SIGHUP) + self.assertTrue(m_logging.info) + + @mock.patch('trollius.unix_events.signal') + def test_remove_signal_handler_error(self, m_signal): + m_signal.NSIG = signal.NSIG + self.loop.add_signal_handler(signal.SIGHUP, lambda: True) + + m_signal.signal.side_effect = OSError + + self.assertRaises( + OSError, self.loop.remove_signal_handler, signal.SIGHUP) + + @mock.patch('trollius.unix_events.signal') + def test_remove_signal_handler_error2(self, m_signal): + m_signal.NSIG = signal.NSIG + self.loop.add_signal_handler(signal.SIGHUP, lambda: True) + + class Err(OSError): + errno = errno.EINVAL + m_signal.signal.side_effect = Err + + self.assertRaises( + RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP) + + @mock.patch('trollius.unix_events.signal') + def test_close(self, m_signal): + m_signal.NSIG = signal.NSIG + + self.loop.add_signal_handler(signal.SIGHUP, lambda: True) + self.loop.add_signal_handler(signal.SIGCHLD, lambda: True) + + self.assertEqual(len(self.loop._signal_handlers), 2) + + m_signal.set_wakeup_fd.reset_mock() + + self.loop.close() + + self.assertEqual(len(self.loop._signal_handlers), 0) + m_signal.set_wakeup_fd.assert_called_once_with(-1) + + +@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), + 'UNIX Sockets are not supported') +class SelectorEventLoopUnixSocketTests(test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.SelectorEventLoop() + self.set_event_loop(self.loop) + + def test_create_unix_server_existing_path_sock(self): + with test_utils.unix_socket_path() as path: + sock = socket.socket(socket.AF_UNIX) + sock.bind(path) + with contextlib.closing(sock): + coro = self.loop.create_unix_server(lambda: None, path) + with self.assertRaisesRegex(OSError, + 'Address.*is already in use'): + self.loop.run_until_complete(coro) + + def test_create_unix_server_existing_path_nonsock(self): + with tempfile.NamedTemporaryFile() as file: + coro = self.loop.create_unix_server(lambda: None, file.name) + with self.assertRaisesRegex(OSError, + 'Address.*is already in use'): + self.loop.run_until_complete(coro) + + def test_create_unix_server_ssl_bool(self): + coro = self.loop.create_unix_server(lambda: None, path='spam', + ssl=True) + with self.assertRaisesRegex(TypeError, + 'ssl argument must be an SSLContext'): + self.loop.run_until_complete(coro) + + def test_create_unix_server_nopath_nosock(self): + coro = self.loop.create_unix_server(lambda: None, path=None) + with self.assertRaisesRegex(ValueError, + 'path was not specified, and no sock'): + self.loop.run_until_complete(coro) + + def test_create_unix_server_path_inetsock(self): + sock = socket.socket() + with contextlib.closing(sock): + coro = self.loop.create_unix_server(lambda: None, path=None, + sock=sock) + with self.assertRaisesRegex(ValueError, + 'A UNIX Domain Socket was expected'): + self.loop.run_until_complete(coro) + + @mock.patch('trollius.unix_events.socket') + def test_create_unix_server_bind_error(self, m_socket): + # Ensure that the socket is closed on any bind error + sock = mock.Mock() + m_socket.socket.return_value = sock + m_socket.error = socket.error + + sock.bind.side_effect = OSError + coro = self.loop.create_unix_server(lambda: None, path="/test") + with self.assertRaises(OSError): + self.loop.run_until_complete(coro) + self.assertTrue(sock.close.called) + + sock.bind.side_effect = MemoryError + coro = self.loop.create_unix_server(lambda: None, path="/test") + with self.assertRaises(MemoryError): + self.loop.run_until_complete(coro) + self.assertTrue(sock.close.called) + + def test_create_unix_connection_path_sock(self): + coro = self.loop.create_unix_connection( + lambda: None, os.devnull, sock=object()) + with self.assertRaisesRegex(ValueError, 'path and sock can not be'): + self.loop.run_until_complete(coro) + + def test_create_unix_connection_nopath_nosock(self): + coro = self.loop.create_unix_connection( + lambda: None, None) + with self.assertRaisesRegex(ValueError, + 'no path and sock were specified'): + self.loop.run_until_complete(coro) + + def test_create_unix_connection_nossl_serverhost(self): + coro = self.loop.create_unix_connection( + lambda: None, os.devnull, server_hostname='spam') + with self.assertRaisesRegex(ValueError, + 'server_hostname is only meaningful'): + self.loop.run_until_complete(coro) + + def test_create_unix_connection_ssl_noserverhost(self): + coro = self.loop.create_unix_connection( + lambda: None, os.devnull, ssl=True) + + with self.assertRaisesRegex( + ValueError, 'you have to pass server_hostname when using ssl'): + + self.loop.run_until_complete(coro) + + +class UnixReadPipeTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.Protocol) + self.pipe = mock.Mock(spec_set=io.RawIOBase) + self.pipe.fileno.return_value = 5 + + blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking') + blocking_patcher.start() + self.addCleanup(blocking_patcher.stop) + + fstat_patcher = mock.patch('os.fstat') + m_fstat = fstat_patcher.start() + st = mock.Mock() + st.st_mode = stat.S_IFIFO + m_fstat.return_value = st + self.addCleanup(fstat_patcher.stop) + + def read_pipe_transport(self, waiter=None): + transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe, + self.protocol, + waiter=waiter) + self.addCleanup(close_pipe_transport, transport) + return transport + + def test_ctor(self): + waiter = asyncio.Future(loop=self.loop) + tr = self.read_pipe_transport(waiter=waiter) + self.loop.run_until_complete(waiter) + + self.protocol.connection_made.assert_called_with(tr) + self.loop.assert_reader(5, tr._read_ready) + self.assertIsNone(waiter.result()) + + @mock.patch('os.read') + def test__read_ready(self, m_read): + tr = self.read_pipe_transport() + m_read.return_value = b'data' + tr._read_ready() + + m_read.assert_called_with(5, tr.max_size) + self.protocol.data_received.assert_called_with(b'data') + + @mock.patch('os.read') + def test__read_ready_eof(self, m_read): + tr = self.read_pipe_transport() + m_read.return_value = b'' + tr._read_ready() + + m_read.assert_called_with(5, tr.max_size) + self.assertFalse(self.loop.readers) + test_utils.run_briefly(self.loop) + self.protocol.eof_received.assert_called_with() + self.protocol.connection_lost.assert_called_with(None) + + @mock.patch('os.read') + def test__read_ready_blocked(self, m_read): + tr = self.read_pipe_transport() + m_read.side_effect = BlockingIOError + tr._read_ready() + + m_read.assert_called_with(5, tr.max_size) + test_utils.run_briefly(self.loop) + self.assertFalse(self.protocol.data_received.called) + + @mock.patch('trollius.log.logger.error') + @mock.patch('os.read') + def test__read_ready_error(self, m_read, m_logexc): + tr = self.read_pipe_transport() + err = OSError() + m_read.side_effect = err + tr._close = mock.Mock() + tr._read_ready() + + m_read.assert_called_with(5, tr.max_size) + tr._close.assert_called_with(err) + m_logexc.assert_called_with( + test_utils.MockPattern( + 'Fatal read error on pipe transport' + '\nprotocol:.*\ntransport:.*'), + exc_info=(OSError, MOCK_ANY, MOCK_ANY)) + + @mock.patch('os.read') + def test_pause_reading(self, m_read): + tr = self.read_pipe_transport() + m = mock.Mock() + self.loop.add_reader(5, m) + tr.pause_reading() + self.assertFalse(self.loop.readers) + + @mock.patch('os.read') + def test_resume_reading(self, m_read): + tr = self.read_pipe_transport() + tr.resume_reading() + self.loop.assert_reader(5, tr._read_ready) + + @mock.patch('os.read') + def test_close(self, m_read): + tr = self.read_pipe_transport() + tr._close = mock.Mock() + tr.close() + tr._close.assert_called_with(None) + + @mock.patch('os.read') + def test_close_already_closing(self, m_read): + tr = self.read_pipe_transport() + tr._closing = True + tr._close = mock.Mock() + tr.close() + self.assertFalse(tr._close.called) + + @mock.patch('os.read') + def test__close(self, m_read): + tr = self.read_pipe_transport() + err = object() + tr._close(err) + self.assertTrue(tr._closing) + self.assertFalse(self.loop.readers) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(err) + + def test__call_connection_lost(self): + tr = self.read_pipe_transport() + self.assertIsNotNone(tr._protocol) + self.assertIsNotNone(tr._loop) + + err = None + tr._call_connection_lost(err) + self.protocol.connection_lost.assert_called_with(err) + self.pipe.close.assert_called_with() + + self.assertIsNone(tr._protocol) + self.assertIsNone(tr._loop) + + def test__call_connection_lost_with_err(self): + tr = self.read_pipe_transport() + self.assertIsNotNone(tr._protocol) + self.assertIsNotNone(tr._loop) + + err = OSError() + tr._call_connection_lost(err) + self.protocol.connection_lost.assert_called_with(err) + self.pipe.close.assert_called_with() + + self.assertIsNone(tr._protocol) + self.assertIsNone(tr._loop) + + +class UnixWritePipeTransportTests(test_utils.TestCase): + + def setUp(self): + self.loop = self.new_test_loop() + self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol) + self.pipe = mock.Mock(spec_set=io.RawIOBase) + self.pipe.fileno.return_value = 5 + + blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking') + blocking_patcher.start() + self.addCleanup(blocking_patcher.stop) + + fstat_patcher = mock.patch('os.fstat') + m_fstat = fstat_patcher.start() + st = mock.Mock() + st.st_mode = stat.S_IFSOCK + m_fstat.return_value = st + self.addCleanup(fstat_patcher.stop) + + def write_pipe_transport(self, waiter=None): + transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe, + self.protocol, + waiter=waiter) + self.addCleanup(close_pipe_transport, transport) + return transport + + def test_ctor(self): + waiter = asyncio.Future(loop=self.loop) + tr = self.write_pipe_transport(waiter=waiter) + self.loop.run_until_complete(waiter) + + self.protocol.connection_made.assert_called_with(tr) + self.loop.assert_reader(5, tr._read_ready) + self.assertEqual(None, waiter.result()) + + def test_can_write_eof(self): + tr = self.write_pipe_transport() + self.assertTrue(tr.can_write_eof()) + + @mock.patch('os.write') + def test_write(self, m_write): + tr = self.write_pipe_transport() + m_write.return_value = 4 + tr.write(b'data') + m_write.assert_called_with(5, b'data') + self.assertFalse(self.loop.writers) + self.assertEqual([], tr._buffer) + + @mock.patch('os.write') + def test_write_no_data(self, m_write): + tr = self.write_pipe_transport() + tr.write(b'') + self.assertFalse(m_write.called) + self.assertFalse(self.loop.writers) + self.assertEqual([], tr._buffer) + + @mock.patch('os.write') + def test_write_partial(self, m_write): + tr = self.write_pipe_transport() + m_write.return_value = 2 + tr.write(b'data') + m_write.assert_called_with(5, b'data') + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'ta'], tr._buffer) + + @mock.patch('os.write') + def test_write_buffer(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'previous'] + tr.write(b'data') + self.assertFalse(m_write.called) + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'previous', b'data'], tr._buffer) + + @mock.patch('os.write') + def test_write_again(self, m_write): + tr = self.write_pipe_transport() + m_write.side_effect = BlockingIOError() + tr.write(b'data') + m_write.assert_called_with(5, b'data') + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'data'], tr._buffer) + + @mock.patch('trollius.unix_events.logger') + @mock.patch('os.write') + def test_write_err(self, m_write, m_log): + tr = self.write_pipe_transport() + err = OSError() + m_write.side_effect = err + tr._fatal_error = mock.Mock() + tr.write(b'data') + m_write.assert_called_with(5, b'data') + self.assertFalse(self.loop.writers) + self.assertEqual([], tr._buffer) + tr._fatal_error.assert_called_with( + err, + 'Fatal write error on pipe transport') + self.assertEqual(1, tr._conn_lost) + + tr.write(b'data') + self.assertEqual(2, tr._conn_lost) + tr.write(b'data') + tr.write(b'data') + tr.write(b'data') + tr.write(b'data') + # This is a bit overspecified. :-( + m_log.warning.assert_called_with( + 'pipe closed by peer or os.write(pipe, data) raised exception.') + tr.close() + + @mock.patch('os.write') + def test_write_close(self, m_write): + tr = self.write_pipe_transport() + tr._read_ready() # pipe was closed by peer + + tr.write(b'data') + self.assertEqual(tr._conn_lost, 1) + tr.write(b'data') + self.assertEqual(tr._conn_lost, 2) + + def test__read_ready(self): + tr = self.write_pipe_transport() + tr._read_ready() + self.assertFalse(self.loop.readers) + self.assertFalse(self.loop.writers) + self.assertTrue(tr._closing) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + + @mock.patch('os.write') + def test__write_ready(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'da', b'ta'] + m_write.return_value = 4 + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.assertFalse(self.loop.writers) + self.assertEqual([], tr._buffer) + + @mock.patch('os.write') + def test__write_ready_partial(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'da', b'ta'] + m_write.return_value = 3 + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'a'], tr._buffer) + + @mock.patch('os.write') + def test__write_ready_again(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'da', b'ta'] + m_write.side_effect = BlockingIOError() + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'data'], tr._buffer) + + @mock.patch('os.write') + def test__write_ready_empty(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'da', b'ta'] + m_write.return_value = 0 + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.loop.assert_writer(5, tr._write_ready) + self.assertEqual([b'data'], tr._buffer) + + @mock.patch('trollius.log.logger.error') + @mock.patch('os.write') + def test__write_ready_err(self, m_write, m_logexc): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._buffer = [b'da', b'ta'] + m_write.side_effect = err = OSError() + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.assertFalse(self.loop.writers) + self.assertFalse(self.loop.readers) + self.assertEqual([], tr._buffer) + self.assertTrue(tr._closing) + m_logexc.assert_called_with( + test_utils.MockPattern( + 'Fatal write error on pipe transport' + '\nprotocol:.*\ntransport:.*'), + exc_info=(OSError, MOCK_ANY, MOCK_ANY)) + self.assertEqual(1, tr._conn_lost) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(err) + + @mock.patch('os.write') + def test__write_ready_closing(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + tr._closing = True + tr._buffer = [b'da', b'ta'] + m_write.return_value = 4 + tr._write_ready() + m_write.assert_called_with(5, b'data') + self.assertFalse(self.loop.writers) + self.assertFalse(self.loop.readers) + self.assertEqual([], tr._buffer) + self.protocol.connection_lost.assert_called_with(None) + self.pipe.close.assert_called_with() + + @mock.patch('os.write') + def test_abort(self, m_write): + tr = self.write_pipe_transport() + self.loop.add_writer(5, tr._write_ready) + self.loop.add_reader(5, tr._read_ready) + tr._buffer = [b'da', b'ta'] + tr.abort() + self.assertFalse(m_write.called) + self.assertFalse(self.loop.readers) + self.assertFalse(self.loop.writers) + self.assertEqual([], tr._buffer) + self.assertTrue(tr._closing) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + + def test__call_connection_lost(self): + tr = self.write_pipe_transport() + self.assertIsNotNone(tr._protocol) + self.assertIsNotNone(tr._loop) + + err = None + tr._call_connection_lost(err) + self.protocol.connection_lost.assert_called_with(err) + self.pipe.close.assert_called_with() + + self.assertIsNone(tr._protocol) + self.assertIsNone(tr._loop) + + def test__call_connection_lost_with_err(self): + tr = self.write_pipe_transport() + self.assertIsNotNone(tr._protocol) + self.assertIsNotNone(tr._loop) + + err = OSError() + tr._call_connection_lost(err) + self.protocol.connection_lost.assert_called_with(err) + self.pipe.close.assert_called_with() + + self.assertIsNone(tr._protocol) + self.assertIsNone(tr._loop) + + def test_close(self): + tr = self.write_pipe_transport() + tr.write_eof = mock.Mock() + tr.close() + tr.write_eof.assert_called_with() + + # closing the transport twice must not fail + tr.close() + + def test_close_closing(self): + tr = self.write_pipe_transport() + tr.write_eof = mock.Mock() + tr._closing = True + tr.close() + self.assertFalse(tr.write_eof.called) + + def test_write_eof(self): + tr = self.write_pipe_transport() + tr.write_eof() + self.assertTrue(tr._closing) + self.assertFalse(self.loop.readers) + test_utils.run_briefly(self.loop) + self.protocol.connection_lost.assert_called_with(None) + + def test_write_eof_pending(self): + tr = self.write_pipe_transport() + tr._buffer = [b'data'] + tr.write_eof() + self.assertTrue(tr._closing) + self.assertFalse(self.protocol.connection_lost.called) + + +class AbstractChildWatcherTests(test_utils.TestCase): + + def test_not_implemented(self): + f = mock.Mock() + watcher = asyncio.AbstractChildWatcher() + self.assertRaises( + NotImplementedError, watcher.add_child_handler, f, f) + self.assertRaises( + NotImplementedError, watcher.remove_child_handler, f) + self.assertRaises( + NotImplementedError, watcher.attach_loop, f) + self.assertRaises( + NotImplementedError, watcher.close) + self.assertRaises( + NotImplementedError, watcher.__enter__) + self.assertRaises( + NotImplementedError, watcher.__exit__, f, f, f) + + +class BaseChildWatcherTests(test_utils.TestCase): + + def test_not_implemented(self): + f = mock.Mock() + watcher = unix_events.BaseChildWatcher() + self.assertRaises( + NotImplementedError, watcher._do_waitpid, f) + + +WaitPidMocks = collections.namedtuple("WaitPidMocks", + ("waitpid", + "WIFEXITED", + "WIFSIGNALED", + "WEXITSTATUS", + "WTERMSIG", + )) + + +class ChildWatcherTestsMixin: + + ignore_warnings = mock.patch.object(log.logger, "warning") + + def setUp(self): + self.loop = self.new_test_loop() + self.running = False + self.zombies = {} + + with mock.patch.object( + self.loop, "add_signal_handler") as self.m_add_signal_handler: + self.watcher = self.create_watcher() + self.watcher.attach_loop(self.loop) + + def waitpid(self, pid, flags): + if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1: + self.assertGreater(pid, 0) + try: + if pid < 0: + return self.zombies.popitem() + else: + return pid, self.zombies.pop(pid) + except KeyError: + pass + if self.running: + return 0, 0 + else: + raise ChildProcessError() + + def add_zombie(self, pid, returncode): + self.zombies[pid] = returncode + 32768 + + def WIFEXITED(self, status): + return status >= 32768 + + def WIFSIGNALED(self, status): + return 32700 < status < 32768 + + def WEXITSTATUS(self, status): + self.assertTrue(self.WIFEXITED(status)) + return status - 32768 + + def WTERMSIG(self, status): + self.assertTrue(self.WIFSIGNALED(status)) + return 32768 - status + + def test_create_watcher(self): + self.m_add_signal_handler.assert_called_once_with( + signal.SIGCHLD, self.watcher._sig_chld) + + def waitpid_mocks(func): + def wrapped_func(self): + exit_stack = [] + + def patch(target, wrapper): + m = mock.patch(target, wraps=wrapper) + exit_stack.append(m) + return m.__enter__() + + m_waitpid = patch('os.waitpid', self.waitpid) + m_WIFEXITED = patch('os.WIFEXITED', self.WIFEXITED) + m_WIFSIGNALED = patch('os.WIFSIGNALED', self.WIFSIGNALED) + m_WEXITSTATUS = patch('os.WEXITSTATUS', self.WEXITSTATUS) + m_WTERMSIG = patch('os.WTERMSIG', self.WTERMSIG) + try: + func(self, WaitPidMocks(m_waitpid, + m_WIFEXITED, m_WIFSIGNALED, + m_WEXITSTATUS, m_WTERMSIG, + )) + finally: + for obj in reversed(exit_stack): + obj.__exit__(None, None, None) + + return wrapped_func + + @waitpid_mocks + def test_sigchld(self, m): + # register a child + callback = mock.Mock() + + with self.watcher: + self.running = True + self.watcher.add_child_handler(42, callback, 9, 10, 14) + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child is running + self.watcher._sig_chld() + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child terminates (returncode 12) + self.running = False + self.add_zombie(42, 12) + self.watcher._sig_chld() + + self.assertTrue(m.WIFEXITED.called) + self.assertTrue(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + callback.assert_called_once_with(42, 12, 9, 10, 14) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WEXITSTATUS.reset_mock() + callback.reset_mock() + + # ensure that the child is effectively reaped + self.add_zombie(42, 13) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback.called) + self.assertFalse(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WEXITSTATUS.reset_mock() + + # sigchld called again + self.zombies.clear() + self.watcher._sig_chld() + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + @waitpid_mocks + def test_sigchld_two_children(self, m): + callback1 = mock.Mock() + callback2 = mock.Mock() + + # register child 1 + with self.watcher: + self.running = True + self.watcher.add_child_handler(43, callback1, 7, 8) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # register child 2 + with self.watcher: + self.watcher.add_child_handler(44, callback2, 147, 18) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # children are running + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child 1 terminates (signal 3) + self.add_zombie(43, -3) + self.watcher._sig_chld() + + callback1.assert_called_once_with(43, -3, 7, 8) + self.assertFalse(callback2.called) + self.assertTrue(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertTrue(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WTERMSIG.reset_mock() + callback1.reset_mock() + + # child 2 still running + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child 2 terminates (code 108) + self.add_zombie(44, 108) + self.running = False + self.watcher._sig_chld() + + callback2.assert_called_once_with(44, 108, 147, 18) + self.assertFalse(callback1.called) + self.assertTrue(m.WIFEXITED.called) + self.assertTrue(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WEXITSTATUS.reset_mock() + callback2.reset_mock() + + # ensure that the children are effectively reaped + self.add_zombie(43, 14) + self.add_zombie(44, 15) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WEXITSTATUS.reset_mock() + + # sigchld called again + self.zombies.clear() + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + @waitpid_mocks + def test_sigchld_two_children_terminating_together(self, m): + callback1 = mock.Mock() + callback2 = mock.Mock() + + # register child 1 + with self.watcher: + self.running = True + self.watcher.add_child_handler(45, callback1, 17, 8) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # register child 2 + with self.watcher: + self.watcher.add_child_handler(46, callback2, 1147, 18) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # children are running + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child 1 terminates (code 78) + # child 2 terminates (signal 5) + self.add_zombie(45, 78) + self.add_zombie(46, -5) + self.running = False + self.watcher._sig_chld() + + callback1.assert_called_once_with(45, 78, 17, 8) + callback2.assert_called_once_with(46, -5, 1147, 18) + self.assertTrue(m.WIFSIGNALED.called) + self.assertTrue(m.WIFEXITED.called) + self.assertTrue(m.WEXITSTATUS.called) + self.assertTrue(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WTERMSIG.reset_mock() + m.WEXITSTATUS.reset_mock() + callback1.reset_mock() + callback2.reset_mock() + + # ensure that the children are effectively reaped + self.add_zombie(45, 14) + self.add_zombie(46, 15) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WTERMSIG.called) + + @waitpid_mocks + def test_sigchld_race_condition(self, m): + # register a child + callback = mock.Mock() + + with self.watcher: + # child terminates before being registered + self.add_zombie(50, 4) + self.watcher._sig_chld() + + self.watcher.add_child_handler(50, callback, 1, 12) + + callback.assert_called_once_with(50, 4, 1, 12) + callback.reset_mock() + + # ensure that the child is effectively reaped + self.add_zombie(50, -1) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback.called) + + @waitpid_mocks + def test_sigchld_replace_handler(self, m): + callback1 = mock.Mock() + callback2 = mock.Mock() + + # register a child + with self.watcher: + self.running = True + self.watcher.add_child_handler(51, callback1, 19) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # register the same child again + with self.watcher: + self.watcher.add_child_handler(51, callback2, 21) + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child terminates (signal 8) + self.running = False + self.add_zombie(51, -8) + self.watcher._sig_chld() + + callback2.assert_called_once_with(51, -8, 21) + self.assertFalse(callback1.called) + self.assertTrue(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertTrue(m.WTERMSIG.called) + + m.WIFSIGNALED.reset_mock() + m.WIFEXITED.reset_mock() + m.WTERMSIG.reset_mock() + callback2.reset_mock() + + # ensure that the child is effectively reaped + self.add_zombie(51, 13) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(m.WTERMSIG.called) + + @waitpid_mocks + def test_sigchld_remove_handler(self, m): + callback = mock.Mock() + + # register a child + with self.watcher: + self.running = True + self.watcher.add_child_handler(52, callback, 1984) + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # unregister the child + self.watcher.remove_child_handler(52) + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child terminates (code 99) + self.running = False + self.add_zombie(52, 99) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback.called) + + @waitpid_mocks + def test_sigchld_unknown_status(self, m): + callback = mock.Mock() + + # register a child + with self.watcher: + self.running = True + self.watcher.add_child_handler(53, callback, -19) + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # terminate with unknown status + self.zombies[53] = 1178 + self.running = False + self.watcher._sig_chld() + + callback.assert_called_once_with(53, 1178, -19) + self.assertTrue(m.WIFEXITED.called) + self.assertTrue(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + callback.reset_mock() + m.WIFEXITED.reset_mock() + m.WIFSIGNALED.reset_mock() + + # ensure that the child is effectively reaped + self.add_zombie(53, 101) + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback.called) + + @waitpid_mocks + def test_remove_child_handler(self, m): + callback1 = mock.Mock() + callback2 = mock.Mock() + callback3 = mock.Mock() + + # register children + with self.watcher: + self.running = True + self.watcher.add_child_handler(54, callback1, 1) + self.watcher.add_child_handler(55, callback2, 2) + self.watcher.add_child_handler(56, callback3, 3) + + # remove child handler 1 + self.assertTrue(self.watcher.remove_child_handler(54)) + + # remove child handler 2 multiple times + self.assertTrue(self.watcher.remove_child_handler(55)) + self.assertFalse(self.watcher.remove_child_handler(55)) + self.assertFalse(self.watcher.remove_child_handler(55)) + + # all children terminate + self.add_zombie(54, 0) + self.add_zombie(55, 1) + self.add_zombie(56, 2) + self.running = False + with self.ignore_warnings: + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + callback3.assert_called_once_with(56, 2, 3) + + @waitpid_mocks + def test_sigchld_unhandled_exception(self, m): + callback = mock.Mock() + + # register a child + with self.watcher: + self.running = True + self.watcher.add_child_handler(57, callback) + + # raise an exception + m.waitpid.side_effect = ValueError + + with mock.patch.object(log.logger, + 'error') as m_error: + + self.assertEqual(self.watcher._sig_chld(), None) + self.assertTrue(m_error.called) + + @waitpid_mocks + def test_sigchld_child_reaped_elsewhere(self, m): + # register a child + callback = mock.Mock() + + with self.watcher: + self.running = True + self.watcher.add_child_handler(58, callback) + + self.assertFalse(callback.called) + self.assertFalse(m.WIFEXITED.called) + self.assertFalse(m.WIFSIGNALED.called) + self.assertFalse(m.WEXITSTATUS.called) + self.assertFalse(m.WTERMSIG.called) + + # child terminates + self.running = False + self.add_zombie(58, 4) + + # waitpid is called elsewhere + os.waitpid(58, os.WNOHANG) + + m.waitpid.reset_mock() + + # sigchld + with self.ignore_warnings: + self.watcher._sig_chld() + + if isinstance(self.watcher, asyncio.FastChildWatcher): + # here the FastChildWatche enters a deadlock + # (there is no way to prevent it) + self.assertFalse(callback.called) + else: + callback.assert_called_once_with(58, 255) + + @waitpid_mocks + def test_sigchld_unknown_pid_during_registration(self, m): + # register two children + callback1 = mock.Mock() + callback2 = mock.Mock() + + with self.ignore_warnings: + with self.watcher: + self.running = True + # child 1 terminates + self.add_zombie(591, 7) + # an unknown child terminates + self.add_zombie(593, 17) + + self.watcher._sig_chld() + + self.watcher.add_child_handler(591, callback1) + self.watcher.add_child_handler(592, callback2) + + callback1.assert_called_once_with(591, 7) + self.assertFalse(callback2.called) + + @waitpid_mocks + def test_set_loop(self, m): + # register a child + callback = mock.Mock() + + with self.watcher: + self.running = True + self.watcher.add_child_handler(60, callback) + + # attach a new loop + old_loop = self.loop + self.loop = self.new_test_loop() + patch = mock.patch.object + + with patch(old_loop, "remove_signal_handler") as m_old_remove: + with patch(self.loop, "add_signal_handler") as m_new_add: + + self.watcher.attach_loop(self.loop) + + m_old_remove.assert_called_once_with( + signal.SIGCHLD) + m_new_add.assert_called_once_with( + signal.SIGCHLD, self.watcher._sig_chld) + + # child terminates + self.running = False + self.add_zombie(60, 9) + self.watcher._sig_chld() + + callback.assert_called_once_with(60, 9) + + @waitpid_mocks + def test_set_loop_race_condition(self, m): + # register 3 children + callback1 = mock.Mock() + callback2 = mock.Mock() + callback3 = mock.Mock() + + with self.watcher: + self.running = True + self.watcher.add_child_handler(61, callback1) + self.watcher.add_child_handler(62, callback2) + self.watcher.add_child_handler(622, callback3) + + # detach the loop + old_loop = self.loop + self.loop = None + + with mock.patch.object( + old_loop, "remove_signal_handler") as m_remove_signal_handler: + + self.watcher.attach_loop(None) + + m_remove_signal_handler.assert_called_once_with( + signal.SIGCHLD) + + # child 1 & 2 terminate + self.add_zombie(61, 11) + self.add_zombie(62, -5) + + # SIGCHLD was not caught + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + self.assertFalse(callback3.called) + + # attach a new loop + self.loop = self.new_test_loop() + + with mock.patch.object( + self.loop, "add_signal_handler") as m_add_signal_handler: + + self.watcher.attach_loop(self.loop) + + m_add_signal_handler.assert_called_once_with( + signal.SIGCHLD, self.watcher._sig_chld) + callback1.assert_called_once_with(61, 11) # race condition! + callback2.assert_called_once_with(62, -5) # race condition! + self.assertFalse(callback3.called) + + callback1.reset_mock() + callback2.reset_mock() + + # child 3 terminates + self.running = False + self.add_zombie(622, 19) + self.watcher._sig_chld() + + self.assertFalse(callback1.called) + self.assertFalse(callback2.called) + callback3.assert_called_once_with(622, 19) + + @waitpid_mocks + def test_close(self, m): + # register two children + callback1 = mock.Mock() + + with self.watcher: + self.running = True + # child 1 terminates + self.add_zombie(63, 9) + # other child terminates + self.add_zombie(65, 18) + self.watcher._sig_chld() + + self.watcher.add_child_handler(63, callback1) + self.watcher.add_child_handler(64, callback1) + + self.assertEqual(len(self.watcher._callbacks), 1) + if isinstance(self.watcher, asyncio.FastChildWatcher): + self.assertEqual(len(self.watcher._zombies), 1) + + with mock.patch.object( + self.loop, + "remove_signal_handler") as m_remove_signal_handler: + + self.watcher.close() + + m_remove_signal_handler.assert_called_once_with( + signal.SIGCHLD) + self.assertFalse(self.watcher._callbacks) + if isinstance(self.watcher, asyncio.FastChildWatcher): + self.assertFalse(self.watcher._zombies) + + +class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): + def create_watcher(self): + return asyncio.SafeChildWatcher() + + +class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): + def create_watcher(self): + return asyncio.FastChildWatcher() + + +class PolicyTests(test_utils.TestCase): + + def create_policy(self): + return asyncio.DefaultEventLoopPolicy() + + def test_get_child_watcher(self): + policy = self.create_policy() + self.assertIsNone(policy._watcher) + + watcher = policy.get_child_watcher() + self.assertIsInstance(watcher, asyncio.SafeChildWatcher) + + self.assertIs(policy._watcher, watcher) + + self.assertIs(watcher, policy.get_child_watcher()) + self.assertIsNone(watcher._loop) + + def test_get_child_watcher_after_set(self): + policy = self.create_policy() + watcher = asyncio.FastChildWatcher() + + policy.set_child_watcher(watcher) + self.assertIs(policy._watcher, watcher) + self.assertIs(watcher, policy.get_child_watcher()) + + def test_get_child_watcher_with_mainloop_existing(self): + policy = self.create_policy() + loop = policy.get_event_loop() + + self.assertIsNone(policy._watcher) + watcher = policy.get_child_watcher() + + self.assertIsInstance(watcher, asyncio.SafeChildWatcher) + self.assertIs(watcher._loop, loop) + + loop.close() + + def test_get_child_watcher_thread(self): + + def f(): + policy.set_event_loop(policy.new_event_loop()) + + self.assertIsInstance(policy.get_event_loop(), + asyncio.AbstractEventLoop) + watcher = policy.get_child_watcher() + + self.assertIsInstance(watcher, asyncio.SafeChildWatcher) + self.assertIsNone(watcher._loop) + + policy.get_event_loop().close() + + policy = self.create_policy() + + th = threading.Thread(target=f) + th.start() + th.join() + + def test_child_watcher_replace_mainloop_existing(self): + policy = self.create_policy() + loop = policy.get_event_loop() + + watcher = policy.get_child_watcher() + + self.assertIs(watcher._loop, loop) + + new_loop = policy.new_event_loop() + policy.set_event_loop(new_loop) + + self.assertIs(watcher._loop, new_loop) + + policy.set_event_loop(None) + + self.assertIs(watcher._loop, None) + + loop.close() + new_loop.close() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_windows_events.py b/tests/test_windows_events.py new file mode 100644 index 00000000..ef0ab92c --- /dev/null +++ b/tests/test_windows_events.py @@ -0,0 +1,162 @@ +import os +import sys +from trollius.test_utils import unittest + +if sys.platform != 'win32': + raise unittest.SkipTest('Windows only') + +import trollius as asyncio +from trollius import Return, From +from trollius import _overlapped +from trollius import py33_winapi as _winapi +from trollius import windows_events +from trollius.py33_exceptions import PermissionError, FileNotFoundError +from trollius import test_utils +from trollius.test_utils import mock + + +class UpperProto(asyncio.Protocol): + def __init__(self): + self.buf = [] + + def connection_made(self, trans): + self.trans = trans + + def data_received(self, data): + self.buf.append(data) + if b'\n' in data: + self.trans.write(b''.join(self.buf).upper()) + self.trans.close() + + +class ProactorTests(test_utils.TestCase): + + def setUp(self): + self.loop = asyncio.ProactorEventLoop() + self.set_event_loop(self.loop) + + def test_close(self): + a, b = self.loop._socketpair() + trans = self.loop._make_socket_transport(a, asyncio.Protocol()) + f = asyncio.ensure_future(self.loop.sock_recv(b, 100)) + trans.close() + self.loop.run_until_complete(f) + self.assertEqual(f.result(), b'') + b.close() + + def test_double_bind(self): + ADDRESS = r'\\.\pipe\test_double_bind-%s' % os.getpid() + server1 = windows_events.PipeServer(ADDRESS) + with self.assertRaises(PermissionError): + windows_events.PipeServer(ADDRESS) + server1.close() + + def test_pipe(self): + res = self.loop.run_until_complete(self._test_pipe()) + self.assertEqual(res, 'done') + + def _test_pipe(self): + ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid() + + with self.assertRaises(FileNotFoundError): + yield From(self.loop.create_pipe_connection( + asyncio.Protocol, ADDRESS)) + + [server] = yield From(self.loop.start_serving_pipe( + UpperProto, ADDRESS)) + self.assertIsInstance(server, windows_events.PipeServer) + + clients = [] + for i in range(5): + stream_reader = asyncio.StreamReader(loop=self.loop) + protocol = asyncio.StreamReaderProtocol(stream_reader, + loop=self.loop) + trans, proto = yield From(self.loop.create_pipe_connection( + lambda: protocol, ADDRESS)) + self.assertIsInstance(trans, asyncio.Transport) + self.assertEqual(protocol, proto) + clients.append((stream_reader, trans)) + + for i, (r, w) in enumerate(clients): + w.write('lower-{0}\n'.format(i).encode()) + + for i, (r, w) in enumerate(clients): + response = yield From(r.readline()) + self.assertEqual(response, 'LOWER-{0}\n'.format(i).encode()) + w.close() + + server.close() + + with self.assertRaises(FileNotFoundError): + yield From(self.loop.create_pipe_connection( + asyncio.Protocol, ADDRESS)) + + raise Return('done') + + def test_connect_pipe_cancel(self): + exc = OSError() + exc.winerror = _overlapped.ERROR_PIPE_BUSY + with mock.patch.object(_overlapped, 'ConnectPipe', side_effect=exc) as connect: + coro = self.loop._proactor.connect_pipe('pipe_address') + task = self.loop.create_task(coro) + + # check that it's possible to cancel connect_pipe() + task.cancel() + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(task) + + def test_wait_for_handle(self): + event = _overlapped.CreateEvent(None, True, False, None) + self.addCleanup(_winapi.CloseHandle, event) + + # Wait for unset event with 0.5s timeout; + # result should be False at timeout + fut = self.loop._proactor.wait_for_handle(event, 0.5) + start = self.loop.time() + done = self.loop.run_until_complete(fut) + elapsed = self.loop.time() - start + + self.assertEqual(done, False) + self.assertFalse(fut.result()) + self.assertTrue(0.48 < elapsed < 0.9, elapsed) + + _overlapped.SetEvent(event) + + # Wait for set event; + # result should be True immediately + fut = self.loop._proactor.wait_for_handle(event, 10) + start = self.loop.time() + done = self.loop.run_until_complete(fut) + elapsed = self.loop.time() - start + + self.assertEqual(done, True) + self.assertTrue(fut.result()) + self.assertTrue(0 <= elapsed < 0.3, elapsed) + + # asyncio issue #195: cancelling a done _WaitHandleFuture + # must not crash + fut.cancel() + + def test_wait_for_handle_cancel(self): + event = _overlapped.CreateEvent(None, True, False, None) + self.addCleanup(_winapi.CloseHandle, event) + + # Wait for unset event with a cancelled future; + # CancelledError should be raised immediately + fut = self.loop._proactor.wait_for_handle(event, 10) + fut.cancel() + start = self.loop.time() + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(fut) + elapsed = self.loop.time() - start + self.assertTrue(0 <= elapsed < 0.1, elapsed) + + # asyncio issue #195: cancelling a _WaitHandleFuture twice + # must not crash + fut = self.loop._proactor.wait_for_handle(event) + fut.cancel() + fut.cancel() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_windows_utils.py b/tests/test_windows_utils.py new file mode 100644 index 00000000..f73e2631 --- /dev/null +++ b/tests/test_windows_utils.py @@ -0,0 +1,182 @@ +"""Tests for window_utils""" + +import socket +import sys +import warnings +from trollius.test_utils import unittest + +if sys.platform != 'win32': + raise unittest.SkipTest('Windows only') + +from trollius import _overlapped +from trollius import py33_winapi as _winapi +from trollius import test_support as support +from trollius import test_utils +from trollius import windows_utils +from trollius.test_utils import mock + + +class WinsocketpairTests(unittest.TestCase): + + def check_winsocketpair(self, ssock, csock): + csock.send(b'xxx') + self.assertEqual(b'xxx', ssock.recv(1024)) + csock.close() + ssock.close() + + def test_winsocketpair(self): + ssock, csock = windows_utils.socketpair() + self.check_winsocketpair(ssock, csock) + + @unittest.skipUnless(support.IPV6_ENABLED, + 'IPv6 not supported or enabled') + def test_winsocketpair_ipv6(self): + ssock, csock = windows_utils.socketpair(family=socket.AF_INET6) + self.check_winsocketpair(ssock, csock) + + @unittest.skipIf(hasattr(socket, 'socketpair'), + 'socket.socketpair is available') + @mock.patch('trollius.windows_utils.socket') + def test_winsocketpair_exc(self, m_socket): + m_socket.AF_INET = socket.AF_INET + m_socket.SOCK_STREAM = socket.SOCK_STREAM + m_socket.socket.return_value.getsockname.return_value = ('', 12345) + m_socket.socket.return_value.accept.return_value = object(), object() + m_socket.socket.return_value.connect.side_effect = OSError() + + self.assertRaises(OSError, windows_utils.socketpair) + + def test_winsocketpair_invalid_args(self): + self.assertRaises(ValueError, + windows_utils.socketpair, family=socket.AF_UNSPEC) + self.assertRaises(ValueError, + windows_utils.socketpair, type=socket.SOCK_DGRAM) + self.assertRaises(ValueError, + windows_utils.socketpair, proto=1) + + @unittest.skipIf(hasattr(socket, 'socketpair'), + 'socket.socketpair is available') + @mock.patch('trollius.windows_utils.socket') + def test_winsocketpair_close(self, m_socket): + m_socket.AF_INET = socket.AF_INET + m_socket.SOCK_STREAM = socket.SOCK_STREAM + sock = mock.Mock() + m_socket.socket.return_value = sock + sock.bind.side_effect = OSError + self.assertRaises(OSError, windows_utils.socketpair) + self.assertTrue(sock.close.called) + + +class PipeTests(unittest.TestCase): + + def test_pipe_overlapped(self): + h1, h2 = windows_utils.pipe(overlapped=(True, True)) + try: + ov1 = _overlapped.Overlapped() + self.assertFalse(ov1.pending) + self.assertEqual(ov1.error, 0) + + ov1.ReadFile(h1, 100) + self.assertTrue(ov1.pending) + self.assertEqual(ov1.error, _winapi.ERROR_IO_PENDING) + ERROR_IO_INCOMPLETE = 996 + try: + ov1.getresult() + except WindowsError as e: + self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE) + else: + raise RuntimeError('expected ERROR_IO_INCOMPLETE') + + ov2 = _overlapped.Overlapped() + self.assertFalse(ov2.pending) + self.assertEqual(ov2.error, 0) + + ov2.WriteFile(h2, b"hello") + self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING))) + + res = _winapi.WaitForSingleObject(ov2.event, 100) + self.assertEqual(res, _winapi.WAIT_OBJECT_0) + + self.assertFalse(ov1.pending) + self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE) + self.assertFalse(ov2.pending) + self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING))) + self.assertEqual(ov1.getresult(), b"hello") + finally: + _winapi.CloseHandle(h1) + _winapi.CloseHandle(h2) + + def test_pipe_handle(self): + h, _ = windows_utils.pipe(overlapped=(True, True)) + _winapi.CloseHandle(_) + p = windows_utils.PipeHandle(h) + self.assertEqual(p.fileno(), h) + self.assertEqual(p.handle, h) + + # check garbage collection of p closes handle + with warnings.catch_warnings(): + if sys.version_info >= (3, 4): + warnings.filterwarnings("ignore", "", ResourceWarning) + del p + support.gc_collect() + try: + _winapi.CloseHandle(h) + except OSError as e: + self.assertEqual(e.winerror, 6) # ERROR_INVALID_HANDLE + else: + raise RuntimeError('expected ERROR_INVALID_HANDLE') + + +class PopenTests(unittest.TestCase): + + def test_popen(self): + command = r"""if 1: + import sys + s = sys.stdin.readline() + sys.stdout.write(s.upper()) + sys.stderr.write('stderr') + """ + msg = b"blah\n" + + p = windows_utils.Popen([sys.executable, '-c', command], + stdin=windows_utils.PIPE, + stdout=windows_utils.PIPE, + stderr=windows_utils.PIPE) + + for f in [p.stdin, p.stdout, p.stderr]: + self.assertIsInstance(f, windows_utils.PipeHandle) + + ovin = _overlapped.Overlapped() + ovout = _overlapped.Overlapped() + overr = _overlapped.Overlapped() + + ovin.WriteFile(p.stdin.handle, msg) + ovout.ReadFile(p.stdout.handle, 100) + overr.ReadFile(p.stderr.handle, 100) + + events = [ovin.event, ovout.event, overr.event] + # Super-long timeout for slow buildbots. + res = _winapi.WaitForMultipleObjects(events, True, 10000) + self.assertEqual(res, _winapi.WAIT_OBJECT_0) + self.assertFalse(ovout.pending) + self.assertFalse(overr.pending) + self.assertFalse(ovin.pending) + + self.assertEqual(ovin.getresult(), len(msg)) + out = ovout.getresult().rstrip() + err = overr.getresult().rstrip() + + self.assertGreater(len(out), 0) + self.assertGreater(len(err), 0) + # allow for partial reads... + self.assertTrue(msg.upper().rstrip().startswith(out)) + self.assertTrue(b"stderr".startswith(err)) + + p.stdin.close() + p.stdout.close() + p.stderr.close() + p.wait() + + +if __name__ == '__main__': + unittest.main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..f51177da --- /dev/null +++ b/tox.ini @@ -0,0 +1,90 @@ +[tox] +envlist = py27,py2_release,py2_no_ssl,py2_no_concurrent,py33,py34,py3_release,py3_no_ssl +# and: pyflakes2,pyflakes3 + +[testenv] +deps= + six +setenv = + TROLLIUSDEBUG = 1 +commands= + python -Wd runtests.py -r {posargs} + +[testenv:pyflakes2] +basepython = python2 +deps= + pyflakes +commands= + pyflakes trollius tests runtests.py check.py setup.py + +[testenv:pyflakes3] +basepython = python3 +deps= + pyflakes +commands= + pyflakes trollius tests runtests.py check.py setup.py + +[testenv:py26] +deps= + futures + mock==1.0.1 + ordereddict + six + unittest2 + +[testenv:py27] +deps= + futures + mock + six + unittest2 + +[testenv:py2_release] +# Run tests in release mode +basepython = python2 +deps= + futures + mock + six + unittest2 +setenv = + TROLLIUSDEBUG = + +[testenv:py2_no_ssl] +basepython = python2 +deps= + futures + mock + six + unittest2 +commands= + python -Wd runtests.py --no-ssl -r {posargs} + +[testenv:py2_no_concurrent] +basepython = python2 +deps= + futures + mock + six + unittest2 +commands= + python -Wd runtests.py --no-concurrent -r {posargs} + +[testenv:py32] +deps= + mock + six + +[testenv:py35] +basepython = python3.5 + +[testenv:py3_release] +# Run tests in release mode +basepython = python3 +setenv = + TROLLIUSDEBUG = + +[testenv:py3_no_ssl] +basepython = python3 +commands= + python -Wd runtests.py --no-ssl -r {posargs} diff --git a/trollius/__init__.py b/trollius/__init__.py new file mode 100644 index 00000000..a1379fbc --- /dev/null +++ b/trollius/__init__.py @@ -0,0 +1,59 @@ +"""The trollius package, tracking PEP 3156.""" + +import sys + +# The selectors module is in the stdlib in Python 3.4 but not in 3.3. +# Do this first, so the other submodules can use "from . import selectors". +# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer. +try: + from . import selectors +except ImportError: + import selectors # Will also be exported. + +if sys.platform == 'win32': + # Similar thing for _overlapped. + try: + from . import _overlapped + except ImportError: + import _overlapped # Will also be exported. + +# This relies on each of the submodules having an __all__ variable. +from .base_events import * +from .coroutines import * +from .events import * +from .futures import * +from .locks import * +from .protocols import * +from .py33_exceptions import * +from .queues import * +from .streams import * +from .subprocess import * +from .tasks import * +from .transports import * + +__all__ = (base_events.__all__ + + coroutines.__all__ + + events.__all__ + + py33_exceptions.__all__ + + futures.__all__ + + locks.__all__ + + protocols.__all__ + + queues.__all__ + + streams.__all__ + + subprocess.__all__ + + tasks.__all__ + + transports.__all__) + +if sys.platform == 'win32': # pragma: no cover + from .windows_events import * + __all__ += windows_events.__all__ +else: + from .unix_events import * # pragma: no cover + __all__ += unix_events.__all__ + +try: + from .py3_ssl import * + __all__ += py3_ssl.__all__ +except ImportError: + # SSL support is optionnal + pass diff --git a/trollius/base_events.py b/trollius/base_events.py new file mode 100644 index 00000000..c5e6effd --- /dev/null +++ b/trollius/base_events.py @@ -0,0 +1,1260 @@ +"""Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +""" + + +import collections +import heapq +import inspect +import logging +import os +import socket +import subprocess +import sys +import traceback +import warnings +try: + from collections import OrderedDict +except ImportError: + # Python 2.6: use ordereddict backport + from ordereddict import OrderedDict +try: + from threading import get_ident as _get_thread_ident +except ImportError: + # Python 2 + from threading import _get_ident as _get_thread_ident + +from . import compat +from . import coroutines +from . import events +from . import futures +from . import tasks +from .coroutines import coroutine, From, Return +from .executor import get_default_executor +from .log import logger +from .time_monotonic import time_monotonic, time_monotonic_resolution + + +__all__ = ['BaseEventLoop'] + + +# Argument for default thread pool executor creation. +_MAX_WORKERS = 5 + +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 + +def _format_handle(handle): + cb = handle._callback + if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): + # format the task + return repr(cb.__self__) + else: + return str(handle) + + +def _format_pipe(fd): + if fd == subprocess.PIPE: + return '' + elif fd == subprocess.STDOUT: + return '' + else: + return repr(fd) + + +class _StopError(BaseException): + """Raised to stop the event loop.""" + + +def _check_resolved_address(sock, address): + # Ensure that the address is already resolved to avoid the trap of hanging + # the entire event loop when the address requires doing a DNS lookup. + # + # getaddrinfo() is slow (around 10 us per call): this function should only + # be called in debug mode + family = sock.family + + if family == socket.AF_INET: + host, port = address + elif family == socket.AF_INET6: + host, port = address[:2] + else: + return + + # On Windows, socket.inet_pton() is only available since Python 3.4 + if hasattr(socket, 'inet_pton'): + # getaddrinfo() is slow and has known issue: prefer inet_pton() + # if available + try: + socket.inet_pton(family, host) + except socket.error as exc: + raise ValueError("address must be resolved (IP address), " + "got host %r: %s" + % (host, exc)) + else: + # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is + # already resolved. + type_mask = 0 + if hasattr(socket, 'SOCK_NONBLOCK'): + type_mask |= socket.SOCK_NONBLOCK + if hasattr(socket, 'SOCK_CLOEXEC'): + type_mask |= socket.SOCK_CLOEXEC + try: + socket.getaddrinfo(host, port, + family, + (sock.type & ~type_mask), + sock.proto, + socket.AI_NUMERICHOST) + except socket.gaierror as err: + raise ValueError("address must be resolved (IP address), " + "got host %r: %s" + % (host, err)) + +def _raise_stop_error(*args): + raise _StopError + + +def _run_until_complete_cb(fut): + exc = fut._exception + if (isinstance(exc, BaseException) + and not isinstance(exc, Exception)): + # Issue #22429: run_forever() already finished, no need to + # stop it. + return + _raise_stop_error() + + +class Server(events.AbstractServer): + + def __init__(self, loop, sockets): + self._loop = loop + self.sockets = sockets + self._active_count = 0 + self._waiters = [] + + def __repr__(self): + return '<%s sockets=%r>' % (self.__class__.__name__, self.sockets) + + def _attach(self): + assert self.sockets is not None + self._active_count += 1 + + def _detach(self): + assert self._active_count > 0 + self._active_count -= 1 + if self._active_count == 0 and self.sockets is None: + self._wakeup() + + def close(self): + sockets = self.sockets + if sockets is None: + return + self.sockets = None + for sock in sockets: + self._loop._stop_serving(sock) + if self._active_count == 0: + self._wakeup() + + def _wakeup(self): + waiters = self._waiters + self._waiters = None + for waiter in waiters: + if not waiter.done(): + waiter.set_result(waiter) + + @coroutine + def wait_closed(self): + if self.sockets is None or self._waiters is None: + raise Return() + waiter = futures.Future(loop=self._loop) + self._waiters.append(waiter) + yield From(waiter) + + +class BaseEventLoop(events.AbstractEventLoop): + + def __init__(self): + self._timer_cancelled_count = 0 + self._closed = False + self._ready = collections.deque() + self._scheduled = [] + self._default_executor = None + self._internal_fds = 0 + # Identifier of the thread running the event loop, or None if the + # event loop is not running + self._thread_id = None + self._clock_resolution = time_monotonic_resolution + self._exception_handler = None + self.set_debug(bool(os.environ.get('TROLLIUSDEBUG'))) + # In debug mode, if the execution of a callback or a step of a task + # exceed this duration in seconds, the slow callback/task is logged. + self.slow_callback_duration = 0.1 + self._current_handle = None + self._task_factory = None + self._coroutine_wrapper_set = False + + def __repr__(self): + return ('<%s running=%s closed=%s debug=%s>' + % (self.__class__.__name__, self.is_running(), + self.is_closed(), self.get_debug())) + + def create_task(self, coro): + """Schedule a coroutine object. + + Return a task object. + """ + self._check_closed() + if self._task_factory is None: + task = tasks.Task(coro, loop=self) + if task._source_traceback: + del task._source_traceback[-1] + else: + task = self._task_factory(self, coro) + return task + + def set_task_factory(self, factory): + """Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + """ + if factory is not None and not callable(factory): + raise TypeError('task factory must be a callable or None') + self._task_factory = factory + + def get_task_factory(self): + """Return a task factory, or None if the default one is in use.""" + return self._task_factory + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + """Create socket transport.""" + raise NotImplementedError + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + server_side=False, server_hostname=None, + extra=None, server=None): + """Create SSL transport.""" + raise NotImplementedError + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + """Create datagram transport.""" + raise NotImplementedError + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create read pipe transport.""" + raise NotImplementedError + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create write pipe transport.""" + raise NotImplementedError + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + """Create subprocess transport.""" + raise NotImplementedError + + def _write_to_self(self): + """Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + """ + raise NotImplementedError + + def _process_events(self, event_list): + """Process selector events.""" + raise NotImplementedError + + def _check_closed(self): + if self._closed: + raise RuntimeError('Event loop is closed') + + def run_forever(self): + """Run until stop() is called.""" + self._check_closed() + if self.is_running(): + raise RuntimeError('Event loop is running.') + self._set_coroutine_wrapper(self._debug) + self._thread_id = _get_thread_ident() + try: + while True: + try: + self._run_once() + except _StopError: + break + finally: + self._thread_id = None + self._set_coroutine_wrapper(False) + + def run_until_complete(self, future): + """Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + """ + self._check_closed() + + new_task = not isinstance(future, futures._FUTURE_CLASSES) + future = tasks.ensure_future(future, loop=self) + if new_task: + # An exception is raised if the future didn't complete, so there + # is no need to log the "destroy pending task" message + future._log_destroy_pending = False + + future.add_done_callback(_run_until_complete_cb) + try: + self.run_forever() + except: + if new_task and future.done() and not future.cancelled(): + # The coroutine raised a BaseException. Consume the exception + # to not log a warning, the caller doesn't have access to the + # local task. + future.exception() + raise + future.remove_done_callback(_run_until_complete_cb) + if not future.done(): + raise RuntimeError('Event loop stopped before Future completed.') + + return future.result() + + def stop(self): + """Stop running the event loop. + + Every callback scheduled before stop() is called will run. Callbacks + scheduled after stop() is called will not run. However, those callbacks + will run if run_forever is called again later. + """ + self.call_soon(_raise_stop_error) + + def close(self): + """Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + """ + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self._closed: + return + if self._debug: + logger.debug("Close %r", self) + self._closed = True + self._ready.clear() + del self._scheduled[:] + executor = self._default_executor + if executor is not None: + self._default_executor = None + executor.shutdown(wait=False) + + def is_closed(self): + """Returns True if the event loop was closed.""" + return self._closed + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self.is_closed(): + warnings.warn("unclosed event loop %r" % self, ResourceWarning) + if not self.is_running(): + self.close() + + def is_running(self): + """Returns True if the event loop is running.""" + return (self._thread_id is not None) + + def time(self): + """Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + """ + return time_monotonic() + + def call_later(self, delay, callback, *args): + """Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + timer = self.call_at(self.time() + delay, callback, *args) + if timer._source_traceback: + del timer._source_traceback[-1] + return timer + + def call_at(self, when, callback, *args): + """Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + """ + if (coroutines.iscoroutine(callback) + or coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used with call_at()") + self._check_closed() + if self._debug: + self._check_thread() + timer = events.TimerHandle(when, callback, args, self) + if timer._source_traceback: + del timer._source_traceback[-1] + heapq.heappush(self._scheduled, timer) + timer._scheduled = True + return timer + + def call_soon(self, callback, *args): + """Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + if self._debug: + self._check_thread() + handle = self._call_soon(callback, args) + if handle._source_traceback: + del handle._source_traceback[-1] + return handle + + def _call_soon(self, callback, args): + if (coroutines.iscoroutine(callback) + or coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used with call_soon()") + self._check_closed() + handle = events.Handle(callback, args, self) + if handle._source_traceback: + del handle._source_traceback[-1] + self._ready.append(handle) + return handle + + def _check_thread(self): + """Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + """ + if self._thread_id is None: + return + thread_id = _get_thread_ident() + if thread_id != self._thread_id: + raise RuntimeError( + "Non-thread-safe operation invoked on an event loop other " + "than the current one") + + def call_soon_threadsafe(self, callback, *args): + """Like call_soon(), but thread-safe.""" + handle = self._call_soon(callback, args) + if handle._source_traceback: + del handle._source_traceback[-1] + self._write_to_self() + return handle + + def run_in_executor(self, executor, func, *args): + if (coroutines.iscoroutine(func) + or coroutines.iscoroutinefunction(func)): + raise TypeError("coroutines cannot be used with run_in_executor()") + self._check_closed() + if isinstance(func, events.Handle): + assert not args + assert not isinstance(func, events.TimerHandle) + if func._cancelled: + f = futures.Future(loop=self) + f.set_result(None) + return f + func, args = func._callback, func._args + if executor is None: + executor = self._default_executor + if executor is None: + executor = get_default_executor() + self._default_executor = executor + return futures.wrap_future(executor.submit(func, *args), loop=self) + + def set_default_executor(self, executor): + self._default_executor = executor + + def _getaddrinfo_debug(self, host, port, family, type, proto, flags): + msg = ["%s:%r" % (host, port)] + if family: + msg.append('family=%r' % family) + if type: + msg.append('type=%r' % type) + if proto: + msg.append('proto=%r' % proto) + if flags: + msg.append('flags=%r' % flags) + msg = ', '.join(msg) + logger.debug('Get address info %s', msg) + + t0 = self.time() + addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags) + dt = self.time() - t0 + + msg = ('Getting address info %s took %.3f ms: %r' + % (msg, dt * 1e3, addrinfo)) + if dt >= self.slow_callback_duration: + logger.info(msg) + else: + logger.debug(msg) + return addrinfo + + def getaddrinfo(self, host, port, + family=0, type=0, proto=0, flags=0): + if self._debug: + return self.run_in_executor(None, self._getaddrinfo_debug, + host, port, family, type, proto, flags) + else: + return self.run_in_executor(None, socket.getaddrinfo, + host, port, family, type, proto, flags) + + def getnameinfo(self, sockaddr, flags=0): + return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) + + @coroutine + def create_connection(self, protocol_factory, host=None, port=None, + ssl=None, family=0, proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None): + """Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + """ + if server_hostname is not None and not ssl: + raise ValueError('server_hostname is only meaningful with ssl') + + if server_hostname is None and ssl: + # Use host as default for server_hostname. It is an error + # if host is empty or not set, e.g. when an + # already-connected socket was passed or when only a port + # is given. To avoid this error, you can pass + # server_hostname='' -- this will bypass the hostname + # check. (This also means that if host is a numeric + # IP/IPv6 address, we will attempt to verify that exact + # address; this will probably fail, but it is possible to + # create a certificate for a specific IP address, so we + # don't judge it here.) + if not host: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + server_hostname = host + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + f1 = self.getaddrinfo( + host, port, family=family, + type=socket.SOCK_STREAM, proto=proto, flags=flags) + fs = [f1] + if local_addr is not None: + f2 = self.getaddrinfo( + *local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, flags=flags) + fs.append(f2) + else: + f2 = None + + yield From(tasks.wait(fs, loop=self)) + + infos = f1.result() + if not infos: + raise socket.error('getaddrinfo() returned empty list') + if f2 is not None: + laddr_infos = f2.result() + if not laddr_infos: + raise socket.error('getaddrinfo() returned empty list') + + exceptions = [] + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + if f2 is not None: + for _, _, _, _, laddr in laddr_infos: + try: + sock.bind(laddr) + break + except socket.error as exc: + exc = socket.error( + exc.errno, 'error while ' + 'attempting to bind on address ' + '{0!r}: {1}'.format( + laddr, exc.strerror.lower())) + exceptions.append(exc) + else: + sock.close() + sock = None + continue + if self._debug: + logger.debug("connect %r to %r", sock, address) + yield From(self.sock_connect(sock, address)) + except socket.error as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + if len(exceptions) == 1: + raise exceptions[0] + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise socket.error('Multiple exceptions: {0}'.format( + ', '.join(str(exc) for exc in exceptions))) + + elif sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + + sock.setblocking(False) + + transport, protocol = yield From(self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname)) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r connected to %s:%r: (%r, %r)", + sock, host, port, transport, protocol) + raise Return(transport, protocol) + + @coroutine + def _create_connection_transport(self, sock, protocol_factory, ssl, + server_hostname): + protocol = protocol_factory() + waiter = futures.Future(loop=self) + if ssl: + sslcontext = None if isinstance(ssl, bool) else ssl + transport = self._make_ssl_transport( + sock, protocol, sslcontext, waiter, + server_side=False, server_hostname=server_hostname) + else: + transport = self._make_socket_transport(sock, protocol, waiter) + + try: + yield From(waiter) + except: + transport.close() + raise + + raise Return(transport, protocol) + + @coroutine + def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, + family=0, proto=0, flags=0): + """Create datagram connection.""" + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') + + infos = yield From(self.getaddrinfo( + *addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags)) + if not infos: + raise socket.error('getaddrinfo() returned empty list') + + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address + + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] + + if not addr_pairs_info: + raise ValueError('can not get address information') + + exceptions = [] + + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield From(self.sock_connect(sock, remote_address)) + r_addr = remote_address + except socket.error as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + raise exceptions[0] + + protocol = protocol_factory() + waiter = futures.Future(loop=self) + transport = self._make_datagram_transport(sock, protocol, r_addr, + waiter) + if self._debug: + if local_addr: + logger.info("Datagram endpoint local_addr=%r remote_addr=%r " + "created: (%r, %r)", + local_addr, remote_addr, transport, protocol) + else: + logger.debug("Datagram endpoint remote_addr=%r created: " + "(%r, %r)", + remote_addr, transport, protocol) + + try: + yield From(waiter) + except: + transport.close() + raise + + raise Return(transport, protocol) + + @coroutine + def create_server(self, protocol_factory, host=None, port=None, + family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, + sock=None, + backlog=100, + ssl=None, + reuse_address=None): + """Create a TCP server bound to host and port. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + """ + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + AF_INET6 = getattr(socket, 'AF_INET6', 0) + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + sockets = [] + if host == '': + host = None + + infos = yield From(self.getaddrinfo( + host, port, family=family, + type=socket.SOCK_STREAM, proto=0, flags=flags)) + if not infos: + raise socket.error('getaddrinfo() returned empty list') + + completed = False + try: + for res in infos: + af, socktype, proto, canonname, sa = res + try: + sock = socket.socket(af, socktype, proto) + except socket.error: + # Assume it's a bad family/type/protocol combination. + if self._debug: + logger.warning('create_server() failed to create ' + 'socket.socket(%r, %r, %r)', + af, socktype, proto, exc_info=True) + continue + sockets.append(sock) + if reuse_address: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, + True) + # Disable IPv4/IPv6 dual stack support (enabled by + # default on Linux) which makes a single socket + # listen on both address families. + if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): + sock.setsockopt(socket.IPPROTO_IPV6, + socket.IPV6_V6ONLY, + True) + try: + sock.bind(sa) + except socket.error as err: + raise socket.error(err.errno, + 'error while attempting ' + 'to bind on address %r: %s' + % (sa, err.strerror.lower())) + completed = True + finally: + if not completed: + for sock in sockets: + sock.close() + else: + if sock is None: + raise ValueError('Neither host/port nor sock were specified') + sockets = [sock] + + server = Server(self, sockets) + for sock in sockets: + sock.listen(backlog) + sock.setblocking(False) + self._start_serving(protocol_factory, sock, ssl, server) + if self._debug: + logger.info("%r is serving", server) + raise Return(server) + + @coroutine + def connect_read_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = futures.Future(loop=self) + transport = self._make_read_pipe_transport(pipe, protocol, waiter) + + try: + yield From(waiter) + except: + transport.close() + raise + + if self._debug: + logger.debug('Read pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + raise Return(transport, protocol) + + @coroutine + def connect_write_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = futures.Future(loop=self) + transport = self._make_write_pipe_transport(pipe, protocol, waiter) + + try: + yield From(waiter) + except: + transport.close() + raise + + if self._debug: + logger.debug('Write pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + raise Return(transport, protocol) + + def _log_subprocess(self, msg, stdin, stdout, stderr): + info = [msg] + if stdin is not None: + info.append('stdin=%s' % _format_pipe(stdin)) + if stdout is not None and stderr == subprocess.STDOUT: + info.append('stdout=stderr=%s' % _format_pipe(stdout)) + else: + if stdout is not None: + info.append('stdout=%s' % _format_pipe(stdout)) + if stderr is not None: + info.append('stderr=%s' % _format_pipe(stderr)) + logger.debug(' '.join(info)) + + @coroutine + def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=False, shell=True, bufsize=0, + **kwargs): + if not isinstance(cmd, compat.string_types): + raise ValueError("cmd must be a string") + if universal_newlines: + raise ValueError("universal_newlines must be False") + if not shell: + raise ValueError("shell must be True") + if bufsize != 0: + raise ValueError("bufsize must be 0") + protocol = protocol_factory() + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'run shell command %r' % cmd + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = yield From(self._make_subprocess_transport( + protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)) + if self._debug: + logger.info('%s: %r' % (debug_log, transport)) + raise Return(transport, protocol) + + @coroutine + def subprocess_exec(self, protocol_factory, program, *args, **kwargs): + stdin = kwargs.pop('stdin', subprocess.PIPE) + stdout = kwargs.pop('stdout', subprocess.PIPE) + stderr = kwargs.pop('stderr', subprocess.PIPE) + universal_newlines = kwargs.pop('universal_newlines', False) + shell = kwargs.pop('shell', False) + bufsize = kwargs.pop('bufsize', 0) + if universal_newlines: + raise ValueError("universal_newlines must be False") + if shell: + raise ValueError("shell must be False") + if bufsize != 0: + raise ValueError("bufsize must be 0") + popen_args = (program,) + args + for arg in popen_args: + if not isinstance(arg, compat.string_types ): + raise TypeError("program arguments must be " + "a bytes or text string, not %s" + % type(arg).__name__) + protocol = protocol_factory() + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'execute program %r' % program + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = yield From(self._make_subprocess_transport( + protocol, popen_args, False, stdin, stdout, stderr, + bufsize, **kwargs)) + if self._debug: + logger.info('%s: %r' % (debug_log, transport)) + raise Return(transport, protocol) + + def set_exception_handler(self, handler): + """Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + """ + if handler is not None and not callable(handler): + raise TypeError('A callable object or None is expected, ' + 'got {0!r}'.format(handler)) + self._exception_handler = handler + + def default_exception_handler(self, context): + """Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + The context parameter has the same meaning as in + `call_exception_handler()`. + """ + message = context.get('message') + if not message: + message = 'Unhandled exception in event loop' + + exception = context.get('exception') + if exception is not None: + if hasattr(exception, '__traceback__'): + # Python 3 + tb = exception.__traceback__ + else: + # call_exception_handler() is usually called indirectly + # from an except block. If it's not the case, the traceback + # is undefined... + tb = sys.exc_info()[2] + exc_info = (type(exception), exception, tb) + else: + exc_info = False + + if ('source_traceback' not in context + and self._current_handle is not None + and self._current_handle._source_traceback): + context['handle_traceback'] = self._current_handle._source_traceback + + log_lines = [message] + for key in sorted(context): + if key in ('message', 'exception'): + continue + value = context[key] + if key == 'source_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Object created at (most recent call last):\n' + value += tb.rstrip() + elif key == 'handle_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Handle created at (most recent call last):\n' + value += tb.rstrip() + else: + value = repr(value) + log_lines.append('{0}: {1}'.format(key, value)) + + logger.error('\n'.join(log_lines), exc_info=exc_info) + + def call_exception_handler(self, context): + """Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + """ + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except Exception: + # Second protection layer for unexpected errors + # in the default implementation, as well as for subclassed + # event loops with overloaded "default_exception_handler". + logger.error('Exception in default exception handler', + exc_info=True) + else: + try: + self._exception_handler(self, context) + except Exception as exc: + # Exception in the user set custom exception handler. + try: + # Let's try default handler. + self.default_exception_handler({ + 'message': 'Unhandled error in exception handler', + 'exception': exc, + 'context': context, + }) + except Exception: + # Guard 'default_exception_handler' in case it is + # overloaded. + logger.error('Exception in default exception handler ' + 'while handling an unexpected error ' + 'in custom exception handler', + exc_info=True) + + def _add_callback(self, handle): + """Add a Handle to _scheduled (TimerHandle) or _ready.""" + assert isinstance(handle, events.Handle), 'A Handle is required here' + if handle._cancelled: + return + assert not isinstance(handle, events.TimerHandle) + self._ready.append(handle) + + def _add_callback_signalsafe(self, handle): + """Like _add_callback() but called from a signal handler.""" + self._add_callback(handle) + self._write_to_self() + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + + def _run_once(self): + """Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + """ + + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + float(self._timer_cancelled_count) / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + else: + new_scheduled.append(handle) + + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled + self._timer_cancelled_count = 0 + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + + timeout = None + if self._ready: + timeout = 0 + elif self._scheduled: + # Compute the desired timeout. + when = self._scheduled[0]._when + timeout = max(0, when - self.time()) + + if self._debug and timeout != 0: + t0 = self.time() + event_list = self._selector.select(timeout) + dt = self.time() - t0 + if dt >= 1.0: + level = logging.INFO + else: + level = logging.DEBUG + nevent = len(event_list) + if timeout is None: + logger.log(level, 'poll took %.3f ms: %s events', + dt * 1e3, nevent) + elif nevent: + logger.log(level, + 'poll %.3f ms took %.3f ms: %s events', + timeout * 1e3, dt * 1e3, nevent) + elif dt >= 1.0: + logger.log(level, + 'poll %.3f ms took %.3f ms: timeout', + timeout * 1e3, dt * 1e3) + else: + event_list = self._selector.select(timeout) + self._process_events(event_list) + + # Handle 'later' callbacks that are ready. + end_time = self.time() + self._clock_resolution + while self._scheduled: + handle = self._scheduled[0] + if handle._when >= end_time: + break + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + self._ready.append(handle) + + # This is the only place where callbacks are actually *called*. + # All other places just add them to ready. + # Note: We run all currently scheduled callbacks, but not any + # callbacks scheduled by callbacks run this time around -- + # they will be run the next time (after another I/O poll). + # Use an idiom that is thread-safe without using locks. + ntodo = len(self._ready) + for i in range(ntodo): + handle = self._ready.popleft() + if handle._cancelled: + continue + if self._debug: + try: + self._current_handle = handle + t0 = self.time() + handle._run() + dt = self.time() - t0 + if dt >= self.slow_callback_duration: + logger.warning('Executing %s took %.3f seconds', + _format_handle(handle), dt) + finally: + self._current_handle = None + else: + handle._run() + handle = None # Needed to break cycles when an exception occurs. + + def _set_coroutine_wrapper(self, enabled): + try: + set_wrapper = sys.set_coroutine_wrapper + get_wrapper = sys.get_coroutine_wrapper + except AttributeError: + return + + enabled = bool(enabled) + if self._coroutine_wrapper_set == enabled: + return + + wrapper = coroutines.debug_wrapper + current_wrapper = get_wrapper() + + if enabled: + if current_wrapper not in (None, wrapper): + warnings.warn( + "loop.set_debug(True): cannot set debug coroutine " + "wrapper; another wrapper is already set %r" % + current_wrapper, RuntimeWarning) + else: + set_wrapper(wrapper) + self._coroutine_wrapper_set = True + else: + if current_wrapper not in (None, wrapper): + warnings.warn( + "loop.set_debug(False): cannot unset debug coroutine " + "wrapper; another wrapper was set %r" % + current_wrapper, RuntimeWarning) + else: + set_wrapper(None) + self._coroutine_wrapper_set = False + + def get_debug(self): + return self._debug + + def set_debug(self, enabled): + self._debug = enabled + + if self.is_running(): + self._set_coroutine_wrapper(enabled) diff --git a/trollius/base_subprocess.py b/trollius/base_subprocess.py new file mode 100644 index 00000000..ffd6e762 --- /dev/null +++ b/trollius/base_subprocess.py @@ -0,0 +1,282 @@ +import collections +import subprocess +import warnings + +from . import compat +from . import futures +from . import protocols +from . import transports +from .coroutines import coroutine, From, Return +from .log import logger +from .py33_exceptions import ProcessLookupError + + +class BaseSubprocessTransport(transports.SubprocessTransport): + + def __init__(self, loop, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=None, extra=None, **kwargs): + super(BaseSubprocessTransport, self).__init__(extra) + self._closed = False + self._protocol = protocol + self._loop = loop + self._proc = None + self._pid = None + self._returncode = None + self._exit_waiters = [] + self._pending_calls = collections.deque() + self._pipes = {} + self._finished = False + + if stdin == subprocess.PIPE: + self._pipes[0] = None + if stdout == subprocess.PIPE: + self._pipes[1] = None + if stderr == subprocess.PIPE: + self._pipes[2] = None + + # Create the child process: set the _proc attribute + try: + self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, bufsize=bufsize, **kwargs) + except: + self.close() + raise + + self._pid = self._proc.pid + self._extra['subprocess'] = self._proc + + if self._loop.get_debug(): + if isinstance(args, (bytes, str)): + program = args + else: + program = args[0] + logger.debug('process %r created: pid %s', + program, self._pid) + + self._loop.create_task(self._connect_pipes(waiter)) + + def __repr__(self): + info = [self.__class__.__name__] + if self._closed: + info.append('closed') + if self._pid is not None: + info.append('pid=%s' % self._pid) + if self._returncode is not None: + info.append('returncode=%s' % self._returncode) + elif self._pid is not None: + info.append('running') + else: + info.append('not started') + + stdin = self._pipes.get(0) + if stdin is not None: + info.append('stdin=%s' % stdin.pipe) + + stdout = self._pipes.get(1) + stderr = self._pipes.get(2) + if stdout is not None and stderr is stdout: + info.append('stdout=stderr=%s' % stdout.pipe) + else: + if stdout is not None: + info.append('stdout=%s' % stdout.pipe) + if stderr is not None: + info.append('stderr=%s' % stderr.pipe) + + return '<%s>' % ' '.join(info) + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + raise NotImplementedError + + def close(self): + if self._closed: + return + self._closed = True + + for proto in self._pipes.values(): + if proto is None: + continue + proto.pipe.close() + + if (self._proc is not None + # the child process finished? + and self._returncode is None + # the child process finished but the transport was not notified yet? + and self._proc.poll() is None + ): + if self._loop.get_debug(): + logger.warning('Close running child process: kill %r', self) + + try: + self._proc.kill() + except ProcessLookupError: + pass + + # Don't clear the _proc reference yet: _post_init() may still run + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._closed: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self.close() + + def get_pid(self): + return self._pid + + def get_returncode(self): + return self._returncode + + def get_pipe_transport(self, fd): + if fd in self._pipes: + return self._pipes[fd].pipe + else: + return None + + def _check_proc(self): + if self._proc is None: + raise ProcessLookupError() + + def send_signal(self, signal): + self._check_proc() + self._proc.send_signal(signal) + + def terminate(self): + self._check_proc() + self._proc.terminate() + + def kill(self): + self._check_proc() + self._proc.kill() + + @coroutine + def _connect_pipes(self, waiter): + try: + proc = self._proc + loop = self._loop + + if proc.stdin is not None: + _, pipe = yield From(loop.connect_write_pipe( + lambda: WriteSubprocessPipeProto(self, 0), + proc.stdin)) + self._pipes[0] = pipe + + if proc.stdout is not None: + _, pipe = yield From(loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 1), + proc.stdout)) + self._pipes[1] = pipe + + if proc.stderr is not None: + _, pipe = yield From(loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 2), + proc.stderr)) + self._pipes[2] = pipe + + assert self._pending_calls is not None + + loop.call_soon(self._protocol.connection_made, self) + for callback, data in self._pending_calls: + loop.call_soon(callback, *data) + self._pending_calls = None + except Exception as exc: + if waiter is not None and not waiter.cancelled(): + waiter.set_exception(exc) + else: + if waiter is not None and not waiter.cancelled(): + waiter.set_result(None) + + def _call(self, cb, *data): + if self._pending_calls is not None: + self._pending_calls.append((cb, data)) + else: + self._loop.call_soon(cb, *data) + + def _pipe_connection_lost(self, fd, exc): + self._call(self._protocol.pipe_connection_lost, fd, exc) + self._try_finish() + + def _pipe_data_received(self, fd, data): + self._call(self._protocol.pipe_data_received, fd, data) + + def _process_exited(self, returncode): + assert returncode is not None, returncode + assert self._returncode is None, self._returncode + if self._loop.get_debug(): + logger.info('%r exited with return code %r', + self, returncode) + self._returncode = returncode + self._call(self._protocol.process_exited) + self._try_finish() + + # wake up futures waiting for wait() + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(returncode) + self._exit_waiters = None + + @coroutine + def _wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + if self._returncode is not None: + raise Return(self._returncode) + + waiter = futures.Future(loop=self._loop) + self._exit_waiters.append(waiter) + returncode = yield From(waiter) + raise Return(returncode) + + def _try_finish(self): + assert not self._finished + if self._returncode is None: + return + if all(p is not None and p.disconnected + for p in self._pipes.values()): + self._finished = True + self._call(self._call_connection_lost, None) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._loop = None + self._proc = None + self._protocol = None + + +class WriteSubprocessPipeProto(protocols.BaseProtocol): + + def __init__(self, proc, fd): + self.proc = proc + self.fd = fd + self.pipe = None + self.disconnected = False + + def connection_made(self, transport): + self.pipe = transport + + def __repr__(self): + return ('<%s fd=%s pipe=%r>' + % (self.__class__.__name__, self.fd, self.pipe)) + + def connection_lost(self, exc): + self.disconnected = True + self.proc._pipe_connection_lost(self.fd, exc) + self.proc = None + + def pause_writing(self): + self.proc._protocol.pause_writing() + + def resume_writing(self): + self.proc._protocol.resume_writing() + + +class ReadSubprocessPipeProto(WriteSubprocessPipeProto, + protocols.Protocol): + + def data_received(self, data): + self.proc._pipe_data_received(self.fd, data) diff --git a/trollius/compat.py b/trollius/compat.py new file mode 100644 index 00000000..df64abac --- /dev/null +++ b/trollius/compat.py @@ -0,0 +1,69 @@ +"""Compatibility helpers for the different Python versions.""" + +import six +import sys + +# Python 2.6 or older? +PY26 = (sys.version_info < (2, 7)) + +# Python 3.3 or newer? +PY33 = (sys.version_info >= (3, 3)) + +# Python 3.4 or newer? +PY34 = sys.version_info >= (3, 4) + +# Python 3.5 or newer? +PY35 = sys.version_info >= (3, 5) + +if six.PY3: + integer_types = (int,) + bytes_type = bytes + text_type = str + string_types = (bytes, str) + BYTES_TYPES = (bytes, bytearray, memoryview) +else: + integer_types = (int, long,) + bytes_type = str + text_type = unicode + string_types = basestring + if PY26: + BYTES_TYPES = (str, bytearray, buffer) + else: # Python 2.7 + BYTES_TYPES = (str, bytearray, memoryview, buffer) + + +if six.PY3: + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value +else: + exec("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + +def flatten_bytes(data): + """ + Convert bytes-like objects (bytes, bytearray, memoryview, buffer) to + a bytes string. + """ + if not isinstance(data, BYTES_TYPES): + raise TypeError('data argument must be byte-ish (%r)', + type(data)) + if PY34: + # In Python 3.4, socket.send() and bytes.join() accept memoryview + # and bytearray + return data + if not data: + return b'' + if six.PY2 and isinstance(data, (buffer, bytearray)): + return str(data) + elif not PY26 and isinstance(data, memoryview): + return data.tobytes() + else: + return data + + +def flatten_list_bytes(data): + """Concatenate a sequence of bytes-like objects.""" + data = map(flatten_bytes, data) + return b''.join(data) diff --git a/trollius/constants.py b/trollius/constants.py new file mode 100644 index 00000000..f9e12328 --- /dev/null +++ b/trollius/constants.py @@ -0,0 +1,7 @@ +"""Constants.""" + +# After the connection is lost, log warnings after this many write()s. +LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 + +# Seconds to wait before retrying accept(). +ACCEPT_RETRY_DELAY = 1 diff --git a/trollius/coroutines.py b/trollius/coroutines.py new file mode 100644 index 00000000..6a06989b --- /dev/null +++ b/trollius/coroutines.py @@ -0,0 +1,496 @@ +__all__ = ['coroutine', + 'iscoroutinefunction', 'iscoroutine', + 'From', 'Return'] + +import functools +import inspect +import opcode +import os +import sys +import textwrap +import traceback +import types + +from . import compat +from . import events +from . import futures +from .log import logger + + +# Opcode of "yield from" instruction +_YIELD_FROM = opcode.opmap.get('YIELD_FROM', None) + +# If you set _DEBUG to true, @coroutine will wrap the resulting +# generator objects in a CoroWrapper instance (defined below). That +# instance will log a message when the generator is never iterated +# over, which may happen when you forget to use "yield from" with a +# coroutine call. Note that the value of the _DEBUG flag is taken +# when the decorator is used, so to be of any use it must be set +# before you define your coroutines. A downside of using this feature +# is that tracebacks show entries for the CoroWrapper.__next__ method +# when _DEBUG is true. +_DEBUG = bool(os.environ.get('TROLLIUSDEBUG')) + + +try: + _types_coroutine = types.coroutine +except AttributeError: + _types_coroutine = None + +try: + _inspect_iscoroutinefunction = inspect.iscoroutinefunction +except AttributeError: + _inspect_iscoroutinefunction = lambda func: False + +try: + from collections.abc import Coroutine as _CoroutineABC, \ + Awaitable as _AwaitableABC +except ImportError: + _CoroutineABC = _AwaitableABC = None + + +if _YIELD_FROM is not None: + # Check for CPython issue #21209 + exec('''if 1: + def has_yield_from_bug(): + class MyGen: + def __init__(self): + self.send_args = None + def __iter__(self): + return self + def __next__(self): + return 42 + def send(self, *what): + self.send_args = what + return None + def yield_from_gen(gen): + yield from gen + value = (1, 2, 3) + gen = MyGen() + coro = yield_from_gen(gen) + next(coro) + coro.send(value) + return gen.send_args != (value,) +''') + _YIELD_FROM_BUG = has_yield_from_bug() + del has_yield_from_bug +else: + _YIELD_FROM_BUG = False + + +if compat.PY35: + return_base_class = Exception +else: + return_base_class = StopIteration + +class ReturnException(return_base_class): + def __init__(self, *args): + return_base_class.__init__(self) + if not args: + self.value = None + elif len(args) == 1: + self.value = args[0] + else: + self.value = args + self.raised = False + if _DEBUG: + frame = sys._getframe(1) + self._source_traceback = traceback.extract_stack(frame) + # explicitly clear the reference to avoid reference cycles + frame = None + else: + self._source_traceback = None + + def __del__(self): + if self.raised: + return + + fmt = 'Return(%r) used without raise' + if self._source_traceback: + fmt += '\nReturn created at (most recent call last):\n' + tb = ''.join(traceback.format_list(self._source_traceback)) + fmt += tb.rstrip() + logger.error(fmt, self.value) + + +if compat.PY33 and not compat.PY35: + # Don't use the Return class on Python 3.3 and 3.4 to support asyncio + # coroutines (to avoid the warning emited in Return destructor). + # + # The problem is that ReturnException inherits from StopIteration. + # "yield from trollius_coroutine". Task._step() does not receive the Return + # exception, because "yield from" handles it internally. So it's not + # possible to set the raised attribute to True to avoid the warning in + # Return destructor. + def Return(*args): + if not args: + value = None + elif len(args) == 1: + value = args[0] + else: + value = args + return StopIteration(value) +else: + Return = ReturnException + + +def debug_wrapper(gen): + # This function is called from 'sys.set_coroutine_wrapper'. + # We only wrap here coroutines defined via 'async def' syntax. + # Generator-based coroutines are wrapped in @coroutine + # decorator. + return CoroWrapper(gen, None) + + +def _coroutine_at_yield_from(coro): + """Test if the last instruction of a coroutine is "yield from". + + Return False if the coroutine completed. + """ + frame = coro.gi_frame + if frame is None: + return False + code = coro.gi_code + assert frame.f_lasti >= 0 + offset = frame.f_lasti + 1 + instr = code.co_code[offset] + return (instr == _YIELD_FROM) + + +class CoroWrapper: + # Wrapper for coroutine object in _DEBUG mode. + + def __init__(self, gen, func=None): + assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen + self.gen = gen + self.func = func # Used to unwrap @coroutine decorator + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + + def __repr__(self): + coro_repr = _format_coroutine(self) + if self._source_traceback: + frame = self._source_traceback[-1] + coro_repr += ', created at %s:%s' % (frame[0], frame[1]) + return '<%s %s>' % (self.__class__.__name__, coro_repr) + + def __iter__(self): + return self + + def __next__(self): + return next(self.gen) + next = __next__ + + if _YIELD_FROM_BUG: + # For for CPython issue #21209: using "yield from" and a custom + # generator, generator.send(tuple) unpacks the tuple instead of passing + # the tuple unchanged. Check if the caller is a generator using "yield + # from" to decide if the parameter should be unpacked or not. + def send(self, *value): + frame = sys._getframe() + caller = frame.f_back + assert caller.f_lasti >= 0 + if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM: + value = value[0] + return self.gen.send(value) + else: + def send(self, value): + return self.gen.send(value) + + def throw(self, exc_type, exc_value=None, exc_tb=None): + return self.gen.throw(exc_type, exc_value, exc_tb) + + def close(self): + return self.gen.close() + + @property + def gi_frame(self): + return self.gen.gi_frame + + @property + def gi_running(self): + return self.gen.gi_running + + @property + def gi_code(self): + return self.gen.gi_code + + if compat.PY35: + + __await__ = __iter__ # make compatible with 'await' expression + + @property + def gi_yieldfrom(self): + return self.gen.gi_yieldfrom + + @property + def cr_await(self): + return self.gen.cr_await + + @property + def cr_running(self): + return self.gen.cr_running + + @property + def cr_code(self): + return self.gen.cr_code + + @property + def cr_frame(self): + return self.gen.cr_frame + + def __del__(self): + # Be careful accessing self.gen.frame -- self.gen might not exist. + gen = getattr(self, 'gen', None) + frame = getattr(gen, 'gi_frame', None) + if frame is None: + frame = getattr(gen, 'cr_frame', None) + if frame is not None and frame.f_lasti == -1: + msg = '%r was never yielded from' % self + tb = getattr(self, '_source_traceback', ()) + if tb: + tb = ''.join(traceback.format_list(tb)) + msg += ('\nCoroutine object created at ' + '(most recent call last):\n') + msg += tb.rstrip() + logger.error(msg) + +if not compat.PY34: + # Backport functools.update_wrapper() from Python 3.4: + # - Python 2.7 fails if assigned attributes don't exist + # - Python 2.7 and 3.1 don't set the __wrapped__ attribute + # - Python 3.2 and 3.3 set __wrapped__ before updating __dict__ + def _update_wrapper(wrapper, + wrapped, + assigned = functools.WRAPPER_ASSIGNMENTS, + updated = functools.WRAPPER_UPDATES): + """Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + """ + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + pass + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + # Issue #17482: set __wrapped__ last so we don't inadvertently copy it + # from the wrapped function when updating __dict__ + wrapper.__wrapped__ = wrapped + # Return the wrapper so this can be used as a decorator via partial() + return wrapper + + def _wraps(wrapped, + assigned = functools.WRAPPER_ASSIGNMENTS, + updated = functools.WRAPPER_UPDATES): + """Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + """ + return functools.partial(_update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) +else: + _wraps = functools.wraps + +_PEP479 = (sys.version_info >= (3, 5)) +if _PEP479: + # Need exec() because yield+return raises a SyntaxError on Python 2 + exec(textwrap.dedent(''' + def pep479_wrapper(func, coro_func): + @_wraps(func) + def pep479_wrapped(*args, **kw): + coro = coro_func(*args, **kw) + value = None + error = None + while True: + try: + if error is not None: + value = coro.throw(error) + elif value is not None: + value = coro.send(value) + else: + value = next(coro) + except RuntimeError: + # FIXME: special case for + # FIXME: "isinstance(exc.__context__, StopIteration)"? + raise + except StopIteration as exc: + return exc.value + except Return as exc: + exc.raised = True + return exc.value + except BaseException as exc: + raise + + try: + value = yield value + error = None + except BaseException as exc: + value = None + error = exc + + return pep479_wrapped + ''')) + + +def coroutine(func): + """Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + """ + if _inspect_iscoroutinefunction(func): + # In Python 3.5 that's all we need to do for coroutines + # defiend with "async def". + # Wrapping in CoroWrapper will happen via + # 'sys.set_coroutine_wrapper' function. + return func + + if inspect.isgeneratorfunction(func): + coro = func + else: + @_wraps(func) + def coro(*args, **kw): + res = func(*args, **kw) + if (isinstance(res, futures._FUTURE_CLASSES) + or inspect.isgenerator(res)): + res = yield From(res) + elif _AwaitableABC is not None: + # If 'func' returns an Awaitable (new in 3.5) we + # want to run it. + try: + await_meth = res.__await__ + except AttributeError: + pass + else: + if isinstance(res, _AwaitableABC): + res = yield From(await_meth()) + raise Return(res) + + if _PEP479: + # FIXME: use @_wraps + coro = pep479_wrapper(func, coro) + coro = _wraps(func)(coro) + + if not _DEBUG: + if _types_coroutine is None: + wrapper = coro + else: + wrapper = _types_coroutine(coro) + else: + @_wraps(func) + def wrapper(*args, **kwds): + w = CoroWrapper(coro(*args, **kwds), func=func) + if w._source_traceback: + del w._source_traceback[-1] + # Python < 3.5 does not implement __qualname__ + # on generator objects, so we set it manually. + # We use getattr as some callables (such as + # functools.partial may lack __qualname__). + w.__name__ = getattr(func, '__name__', None) + w.__qualname__ = getattr(func, '__qualname__', None) + return w + + wrapper._is_coroutine = True # For iscoroutinefunction(). + return wrapper + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function.""" + return (getattr(func, '_is_coroutine', False) or + _inspect_iscoroutinefunction(func)) + + +_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper) +if _CoroutineABC is not None: + _COROUTINE_TYPES += (_CoroutineABC,) +if events.asyncio is not None: + # Accept also asyncio CoroWrapper for interoperability + if hasattr(events.asyncio, 'coroutines'): + _COROUTINE_TYPES += (events.asyncio.coroutines.CoroWrapper,) + else: + # old asyncio/Python versions + _COROUTINE_TYPES += (events.asyncio.tasks.CoroWrapper,) + +def iscoroutine(obj): + """Return True if obj is a coroutine object.""" + return isinstance(obj, _COROUTINE_TYPES) + + +def _format_coroutine(coro): + assert iscoroutine(coro) + + coro_name = None + if isinstance(coro, CoroWrapper): + func = coro.func + coro_name = coro.__qualname__ + if coro_name is not None: + coro_name = '{0}()'.format(coro_name) + else: + func = coro + + if coro_name is None: + coro_name = events._format_callback(func, ()) + + try: + coro_code = coro.gi_code + except AttributeError: + coro_code = coro.cr_code + + try: + coro_frame = coro.gi_frame + except AttributeError: + coro_frame = coro.cr_frame + + filename = coro_code.co_filename + if (isinstance(coro, CoroWrapper) + and not inspect.isgeneratorfunction(coro.func) + and coro.func is not None): + filename, lineno = events._get_function_source(coro.func) + if coro_frame is None: + coro_repr = ('%s done, defined at %s:%s' + % (coro_name, filename, lineno)) + else: + coro_repr = ('%s running, defined at %s:%s' + % (coro_name, filename, lineno)) + elif coro_frame is not None: + lineno = coro_frame.f_lineno + coro_repr = ('%s running at %s:%s' + % (coro_name, filename, lineno)) + else: + lineno = coro_code.co_firstlineno + coro_repr = ('%s done, defined at %s:%s' + % (coro_name, filename, lineno)) + + return coro_repr + + +class FromWrapper(object): + __slots__ = ('obj',) + + def __init__(self, obj): + if isinstance(obj, FromWrapper): + obj = obj.obj + assert not isinstance(obj, FromWrapper) + self.obj = obj + +def From(obj): + if not _DEBUG: + return obj + else: + return FromWrapper(obj) diff --git a/trollius/events.py b/trollius/events.py new file mode 100644 index 00000000..52611612 --- /dev/null +++ b/trollius/events.py @@ -0,0 +1,626 @@ +"""Event loop and event loop policy.""" +from __future__ import absolute_import + +__all__ = ['AbstractEventLoopPolicy', + 'AbstractEventLoop', 'AbstractServer', + 'Handle', 'TimerHandle', + 'get_event_loop_policy', 'set_event_loop_policy', + 'get_event_loop', 'set_event_loop', 'new_event_loop', + 'get_child_watcher', 'set_child_watcher', + ] + +import functools +import inspect +import socket +import subprocess +import sys +import threading +import traceback +try: + import reprlib # Python 3 +except ImportError: + import repr as reprlib # Python 2 + +try: + import asyncio +except (ImportError, SyntaxError): + # ignore SyntaxError for convenience: ignore SyntaxError caused by "yield + # from" if asyncio module is in the Python path + asyncio = None + +from trollius import compat + + +def _get_function_source(func): + if compat.PY34: + func = inspect.unwrap(func) + elif hasattr(func, '__wrapped__'): + func = func.__wrapped__ + if inspect.isfunction(func): + code = func.__code__ + return (code.co_filename, code.co_firstlineno) + if isinstance(func, functools.partial): + return _get_function_source(func.func) + if compat.PY34 and isinstance(func, functools.partialmethod): + return _get_function_source(func.func) + return None + + +def _format_args(args): + """Format function arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + """ + # use reprlib to limit the length of the output + args_repr = reprlib.repr(args) + if len(args) == 1 and args_repr.endswith(',)'): + args_repr = args_repr[:-2] + ')' + return args_repr + + +def _format_callback(func, args, suffix=''): + if isinstance(func, functools.partial): + if args is not None: + suffix = _format_args(args) + suffix + return _format_callback(func.func, func.args, suffix) + + if hasattr(func, '__qualname__'): + func_repr = getattr(func, '__qualname__') + elif hasattr(func, '__name__'): + func_repr = getattr(func, '__name__') + else: + func_repr = repr(func) + + if args is not None: + func_repr += _format_args(args) + if suffix: + func_repr += suffix + return func_repr + +def _format_callback_source(func, args): + func_repr = _format_callback(func, args) + source = _get_function_source(func) + if source: + func_repr += ' at %s:%s' % source + return func_repr + + +class Handle(object): + """Object returned by callback registration methods.""" + + __slots__ = ('_callback', '_args', '_cancelled', '_loop', + '_source_traceback', '_repr', '__weakref__') + + def __init__(self, callback, args, loop): + assert not isinstance(callback, Handle), 'A Handle is not a callback' + self._loop = loop + self._callback = callback + self._args = args + self._cancelled = False + self._repr = None + if self._loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + else: + self._source_traceback = None + + def _repr_info(self): + info = [self.__class__.__name__] + if self._cancelled: + info.append('cancelled') + if self._callback is not None: + info.append(_format_callback_source(self._callback, self._args)) + if self._source_traceback: + frame = self._source_traceback[-1] + info.append('created at %s:%s' % (frame[0], frame[1])) + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() + return '<%s>' % ' '.join(info) + + def cancel(self): + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None + + def _run(self): + try: + self._callback(*self._args) + except Exception as exc: + cb = _format_callback_source(self._callback, self._args) + msg = 'Exception in callback {0}'.format(cb) + context = { + 'message': msg, + 'exception': exc, + 'handle': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self = None # Needed to break cycles when an exception occurs. + + +class TimerHandle(Handle): + """Object returned by timed callback registration methods.""" + + __slots__ = ['_scheduled', '_when'] + + def __init__(self, when, callback, args, loop): + assert when is not None + super(TimerHandle, self).__init__(callback, args, loop) + if self._source_traceback: + del self._source_traceback[-1] + self._when = when + self._scheduled = False + + def _repr_info(self): + info = super(TimerHandle, self)._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, 'when=%s' % self._when) + return info + + def __hash__(self): + return hash(self._when) + + def __lt__(self, other): + return self._when < other._when + + def __le__(self, other): + if self._when < other._when: + return True + return self.__eq__(other) + + def __gt__(self, other): + return self._when > other._when + + def __ge__(self, other): + if self._when > other._when: + return True + return self.__eq__(other) + + def __eq__(self, other): + if isinstance(other, TimerHandle): + return (self._when == other._when and + self._callback == other._callback and + self._args == other._args and + self._cancelled == other._cancelled) + return NotImplemented + + def __ne__(self, other): + equal = self.__eq__(other) + return NotImplemented if equal is NotImplemented else not equal + + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super(TimerHandle, self).cancel() + + +class AbstractServer(object): + """Abstract server returned by create_server().""" + + def close(self): + """Stop serving. This leaves existing connections open.""" + return NotImplemented + + def wait_closed(self): + """Coroutine to wait until service is closed.""" + return NotImplemented + + +if asyncio is not None: + # Reuse asyncio classes so asyncio.set_event_loop() and + # asyncio.set_event_loop_policy() accept Trollius event loop and trollius + # event loop policy + AbstractEventLoop = asyncio.AbstractEventLoop + AbstractEventLoopPolicy = asyncio.AbstractEventLoopPolicy +else: + class AbstractEventLoop(object): + """Abstract event loop.""" + + # Running and stopping the event loop. + + def run_forever(self): + """Run the event loop until stop() is called.""" + raise NotImplementedError + + def run_until_complete(self, future): + """Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + """ + raise NotImplementedError + + def stop(self): + """Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + """ + raise NotImplementedError + + def is_running(self): + """Return whether the event loop is currently running.""" + raise NotImplementedError + + def is_closed(self): + """Returns True if the event loop was closed.""" + raise NotImplementedError + + def close(self): + """Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + """ + raise NotImplementedError + + # Methods scheduling callbacks. All these return Handles. + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + + def call_soon(self, callback, *args): + return self.call_later(0, callback, *args) + + def call_later(self, delay, callback, *args): + raise NotImplementedError + + def call_at(self, when, callback, *args): + raise NotImplementedError + + def time(self): + raise NotImplementedError + + # Method scheduling a coroutine object: create a task. + + def create_task(self, coro): + raise NotImplementedError + + # Methods for interacting with threads. + + def call_soon_threadsafe(self, callback, *args): + raise NotImplementedError + + def run_in_executor(self, executor, func, *args): + raise NotImplementedError + + def set_default_executor(self, executor): + raise NotImplementedError + + # Network I/O methods returning Futures. + + def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0): + raise NotImplementedError + + def getnameinfo(self, sockaddr, flags=0): + raise NotImplementedError + + def create_connection(self, protocol_factory, host=None, port=None, + ssl=None, family=0, proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None): + raise NotImplementedError + + def create_server(self, protocol_factory, host=None, port=None, + family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, + sock=None, backlog=100, ssl=None, reuse_address=None): + """A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + """ + raise NotImplementedError + + def create_unix_connection(self, protocol_factory, path, + ssl=None, sock=None, + server_hostname=None): + raise NotImplementedError + + def create_unix_server(self, protocol_factory, path, + sock=None, backlog=100, ssl=None): + """A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + """ + raise NotImplementedError + + def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, + family=0, proto=0, flags=0): + raise NotImplementedError + + # Pipes and subprocesses. + + def connect_read_pipe(self, protocol_factory, pipe): + """Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + def connect_write_pipe(self, protocol_factory, pipe): + """Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + def subprocess_exec(self, protocol_factory, *args, **kwargs): + raise NotImplementedError + + # Ready-based callback registration methods. + # The add_*() methods return None. + # The remove_*() methods return True if something was removed, + # False if there was nothing to delete. + + def add_reader(self, fd, callback, *args): + raise NotImplementedError + + def remove_reader(self, fd): + raise NotImplementedError + + def add_writer(self, fd, callback, *args): + raise NotImplementedError + + def remove_writer(self, fd): + raise NotImplementedError + + # Completion based I/O methods returning Futures. + + def sock_recv(self, sock, nbytes): + raise NotImplementedError + + def sock_sendall(self, sock, data): + raise NotImplementedError + + def sock_connect(self, sock, address): + raise NotImplementedError + + def sock_accept(self, sock): + raise NotImplementedError + + # Signal handling. + + def add_signal_handler(self, sig, callback, *args): + raise NotImplementedError + + def remove_signal_handler(self, sig): + raise NotImplementedError + + # Task factory. + + def set_task_factory(self, factory): + raise NotImplementedError + + def get_task_factory(self): + raise NotImplementedError + + # Error handlers. + + def set_exception_handler(self, handler): + raise NotImplementedError + + def default_exception_handler(self, context): + raise NotImplementedError + + def call_exception_handler(self, context): + raise NotImplementedError + + # Debug flag management. + + def get_debug(self): + raise NotImplementedError + + def set_debug(self, enabled): + raise NotImplementedError + + + class AbstractEventLoopPolicy(object): + """Abstract policy for accessing the event loop.""" + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.""" + raise NotImplementedError + + def set_event_loop(self, loop): + """Set the event loop for the current context to loop.""" + raise NotImplementedError + + def new_event_loop(self): + """Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.""" + raise NotImplementedError + + # Child processes handling (Unix only). + + def get_child_watcher(self): + "Get the watcher for child processes." + raise NotImplementedError + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + raise NotImplementedError + + +class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): + """Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + """ + + _loop_factory = None + + class _Local(threading.local): + _loop = None + _set_called = False + + def __init__(self): + self._local = self._Local() + + def get_event_loop(self): + """Get the event loop. + + This may be None or an instance of EventLoop. + """ + if (self._local._loop is None and + not self._local._set_called and + isinstance(threading.current_thread(), threading._MainThread)): + self.set_event_loop(self.new_event_loop()) + if self._local._loop is None: + raise RuntimeError('There is no current event loop in thread %r.' + % threading.current_thread().name) + return self._local._loop + + def set_event_loop(self, loop): + """Set the event loop.""" + self._local._set_called = True + assert loop is None or isinstance(loop, AbstractEventLoop) + self._local._loop = loop + + def new_event_loop(self): + """Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + """ + return self._loop_factory() + + +# Event loop policy. The policy itself is always global, even if the +# policy's rules say that there is an event loop per thread (or other +# notion of context). The default policy is installed by the first +# call to get_event_loop_policy(). +_event_loop_policy = None + +# Lock for protecting the on-the-fly creation of the event loop policy. +_lock = threading.Lock() + + +def _init_event_loop_policy(): + global _event_loop_policy + with _lock: + if _event_loop_policy is None: # pragma: no branch + from . import DefaultEventLoopPolicy + _event_loop_policy = DefaultEventLoopPolicy() + + +def get_event_loop_policy(): + """Get the current event loop policy.""" + if _event_loop_policy is None: + _init_event_loop_policy() + return _event_loop_policy + + +def set_event_loop_policy(policy): + """Set the current event loop policy. + + If policy is None, the default policy is restored.""" + global _event_loop_policy + assert policy is None or isinstance(policy, AbstractEventLoopPolicy) + _event_loop_policy = policy + + +def get_event_loop(): + """Equivalent to calling get_event_loop_policy().get_event_loop().""" + return get_event_loop_policy().get_event_loop() + + +def set_event_loop(loop): + """Equivalent to calling get_event_loop_policy().set_event_loop(loop).""" + get_event_loop_policy().set_event_loop(loop) + + +def new_event_loop(): + """Equivalent to calling get_event_loop_policy().new_event_loop().""" + return get_event_loop_policy().new_event_loop() + + +def get_child_watcher(): + """Equivalent to calling get_event_loop_policy().get_child_watcher().""" + return get_event_loop_policy().get_child_watcher() + + +def set_child_watcher(watcher): + """Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).""" + return get_event_loop_policy().set_child_watcher(watcher) diff --git a/trollius/executor.py b/trollius/executor.py new file mode 100644 index 00000000..9e7fdd78 --- /dev/null +++ b/trollius/executor.py @@ -0,0 +1,84 @@ +from .log import logger + +__all__ = ( + 'CancelledError', 'TimeoutError', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + ) + +# Argument for default thread pool executor creation. +_MAX_WORKERS = 5 + +try: + import concurrent.futures + import concurrent.futures._base +except ImportError: + FIRST_COMPLETED = 'FIRST_COMPLETED' + FIRST_EXCEPTION = 'FIRST_EXCEPTION' + ALL_COMPLETED = 'ALL_COMPLETED' + + class Future(object): + def __init__(self, callback, args): + try: + self._result = callback(*args) + self._exception = None + except Exception as err: + self._result = None + self._exception = err + self.callbacks = [] + + def cancelled(self): + return False + + def done(self): + return True + + def exception(self): + return self._exception + + def result(self): + if self._exception is not None: + raise self._exception + else: + return self._result + + def add_done_callback(self, callback): + callback(self) + + class Error(Exception): + """Base class for all future-related exceptions.""" + pass + + class CancelledError(Error): + """The Future was cancelled.""" + pass + + class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + + class SynchronousExecutor: + """ + Synchronous executor: submit() blocks until it gets the result. + """ + def submit(self, callback, *args): + return Future(callback, args) + + def shutdown(self, wait): + pass + + def get_default_executor(): + logger.error("concurrent.futures module is missing: " + "use a synchrounous executor as fallback!") + return SynchronousExecutor() +else: + FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED + FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION + ALL_COMPLETED = concurrent.futures.ALL_COMPLETED + + Future = concurrent.futures.Future + Error = concurrent.futures._base.Error + CancelledError = concurrent.futures.CancelledError + TimeoutError = concurrent.futures.TimeoutError + + def get_default_executor(): + return concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS) diff --git a/trollius/futures.py b/trollius/futures.py new file mode 100644 index 00000000..b77d1a11 --- /dev/null +++ b/trollius/futures.py @@ -0,0 +1,450 @@ +"""A Future class similar to the one in PEP 3148.""" + +__all__ = ['CancelledError', 'TimeoutError', + 'InvalidStateError', + 'Future', 'wrap_future', + ] + +import logging +import six +import sys +import traceback +try: + import reprlib # Python 3 +except ImportError: + import repr as reprlib # Python 2 + +from . import compat +from . import events +from . import executor + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + +Error = executor.Error +CancelledError = executor.CancelledError +TimeoutError = executor.TimeoutError + +STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging + + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + + +class _TracebackLogger(object): + """Helper to log a traceback upon destruction if not cleared. + + This solves a nasty problem with Futures and Tasks that have an + exception set: if nobody asks for the exception, the exception is + never logged. This violates the Zen of Python: 'Errors should + never pass silently. Unless explicitly silenced.' + + However, we don't want to log the exception as soon as + set_exception() is called: if the calling code is written + properly, it will get the exception and handle it properly. But + we *do* want to log it if result() or exception() was never called + -- otherwise developers waste a lot of time wondering why their + buggy code fails silently. + + An earlier attempt added a __del__() method to the Future class + itself, but this backfired because the presence of __del__() + prevents garbage collection from breaking cycles. A way out of + this catch-22 is to avoid having a __del__() method on the Future + class itself, but instead to have a reference to a helper object + with a __del__() method that logs the traceback, where we ensure + that the helper object doesn't participate in cycles, and only the + Future has a reference to it. + + The helper object is added when set_exception() is called. When + the Future is collected, and the helper is present, the helper + object is also collected, and its __del__() method will log the + traceback. When the Future's result() or exception() method is + called (and a helper object is present), it removes the helper + object, after calling its clear() method to prevent it from + logging. + + One downside is that we do a fair amount of work to extract the + traceback from the exception, even when it is never logged. It + would seem cheaper to just store the exception object, but that + references the traceback, which references stack frames, which may + reference the Future, which references the _TracebackLogger, and + then the _TracebackLogger would be included in a cycle, which is + what we're trying to avoid! As an optimization, we don't + immediately format the exception; we only do the work when + activate() is called, which call is delayed until after all the + Future's callbacks have run. Since usually a Future has at least + one callback (typically set by 'yield from') and usually that + callback extracts the callback, thereby removing the need to + format the exception. + + PS. I don't claim credit for this solution. I first heard of it + in a discussion about closing files when they are collected. + """ + + __slots__ = ('loop', 'source_traceback', 'exc', 'tb') + + def __init__(self, future, exc): + self.loop = future._loop + self.source_traceback = future._source_traceback + self.exc = exc + self.tb = None + + def activate(self): + exc = self.exc + if exc is not None: + self.exc = None + self.tb = traceback.format_exception(exc.__class__, exc, + exc.__traceback__) + + def clear(self): + self.exc = None + self.tb = None + + def __del__(self): + if self.tb: + msg = 'Future/Task exception was never retrieved\n' + if self.source_traceback: + src = ''.join(traceback.format_list(self.source_traceback)) + msg += 'Future/Task created at (most recent call last):\n' + msg += '%s\n' % src.rstrip() + msg += ''.join(self.tb).rstrip() + self.loop.call_exception_handler({'message': msg}) + + +class Future(object): + """This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon_threadsafe(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + """ + + # Class variables serving as defaults for instance variables. + _state = _PENDING + _result = None + _exception = None + _loop = None + _source_traceback = None + + _blocking = False # proper use of future (yield vs yield from) + + # Used by Python 2 to raise the exception with the original traceback + # in the exception() method in debug mode + _exception_tb = None + + _log_traceback = False # Used for Python 3.4 and later + _tb_logger = None # Used for Python 3.3 only + + def __init__(self, loop=None): + """Initialize the future. + + The optional event_loop argument allows to explicitly set the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + """ + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._callbacks = [] + if self._loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + def _format_callbacks(self): + cb = self._callbacks + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return events._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0]) + elif size == 2: + cb = '{0}, {1}'.format(format_cb(cb[0]), format_cb(cb[1])) + elif size > 2: + cb = '{0}, <{1} more>, {2}'.format(format_cb(cb[0]), + size-2, + format_cb(cb[-1])) + return 'cb=[%s]' % cb + + def _repr_info(self): + info = [self._state.lower()] + if self._state == _FINISHED: + if self._exception is not None: + info.append('exception={0!r}'.format(self._exception)) + else: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(self._result) + info.append('result={0}'.format(result)) + if self._callbacks: + info.append(self._format_callbacks()) + if self._source_traceback: + frame = self._source_traceback[-1] + info.append('created at %s:%s' % (frame[0], frame[1])) + return info + + def __repr__(self): + info = self._repr_info() + return '<%s %s>' % (self.__class__.__name__, ' '.join(info)) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + exc = self._exception + context = { + 'message': ('%s exception was never retrieved' + % self.__class__.__name__), + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + + def cancel(self): + """Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + """ + if self._state != _PENDING: + return False + self._state = _CANCELLED + self._schedule_callbacks() + return True + + def _schedule_callbacks(self): + """Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + """ + callbacks = self._callbacks[:] + if not callbacks: + return + + self._callbacks[:] = [] + for callback in callbacks: + self._loop.call_soon(callback, self) + + def cancelled(self): + """Return True if the future was cancelled.""" + return self._state == _CANCELLED + + # Don't implement running(); see http://bugs.python.org/issue18699 + + def done(self): + """Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + """ + return self._state != _PENDING + + def result(self): + """Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + """ + if self._state == _CANCELLED: + raise CancelledError + if self._state != _FINISHED: + raise InvalidStateError('Result is not ready.') + self._log_traceback = False + if self._tb_logger is not None: + self._tb_logger.clear() + self._tb_logger = None + exc_tb = self._exception_tb + self._exception_tb = None + if self._exception is not None: + if exc_tb is not None: + compat.reraise(type(self._exception), self._exception, exc_tb) + else: + raise self._exception + return self._result + + def exception(self): + """Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + """ + if self._state == _CANCELLED: + raise CancelledError + if self._state != _FINISHED: + raise InvalidStateError('Exception is not set.') + self._log_traceback = False + if self._tb_logger is not None: + self._tb_logger.clear() + self._tb_logger = None + self._exception_tb = None + return self._exception + + def add_done_callback(self, fn): + """Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + """ + if self._state != _PENDING: + self._loop.call_soon(fn, self) + else: + self._callbacks.append(fn) + + # New method not in PEP 3148. + + def remove_done_callback(self, fn): + """Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + """ + filtered_callbacks = [f for f in self._callbacks if f != fn] + removed_count = len(self._callbacks) - len(filtered_callbacks) + if removed_count: + self._callbacks[:] = filtered_callbacks + return removed_count + + # So-called internal methods (note: no set_running_or_notify_cancel()). + + def _set_result_unless_cancelled(self, result): + """Helper setting the result only if the future was not cancelled.""" + if self.cancelled(): + return + self.set_result(result) + + def set_result(self, result): + """Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise InvalidStateError('{0}: {1!r}'.format(self._state, self)) + self._result = result + self._state = _FINISHED + self._schedule_callbacks() + + def _get_exception_tb(self): + return self._exception_tb + + def set_exception(self, exception): + self._set_exception_with_tb(exception, None) + + def _set_exception_with_tb(self, exception, exc_tb): + """Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise InvalidStateError('{0}: {1!r}'.format(self._state, self)) + if isinstance(exception, type): + exception = exception() + self._exception = exception + if exc_tb is not None: + self._exception_tb = exc_tb + exc_tb = None + elif not six.PY3: + self._exception_tb = sys.exc_info()[2] + self._state = _FINISHED + self._schedule_callbacks() + if compat.PY34: + self._log_traceback = True + else: + self._tb_logger = _TracebackLogger(self, exception) + if hasattr(exception, '__traceback__'): + # Python 3: exception contains a link to the traceback + + # Arrange for the logger to be activated after all callbacks + # have had a chance to call result() or exception(). + self._loop.call_soon(self._tb_logger.activate) + else: + if self._loop.get_debug(): + frame = sys._getframe(1) + tb = ['Traceback (most recent call last):\n'] + if self._exception_tb is not None: + tb += traceback.format_tb(self._exception_tb) + else: + tb += traceback.format_stack(frame) + tb += traceback.format_exception_only(type(exception), exception) + self._tb_logger.tb = tb + else: + self._tb_logger.tb = traceback.format_exception_only( + type(exception), + exception) + + self._tb_logger.exc = None + + # Truly internal methods. + + def _copy_state(self, other): + """Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + """ + assert other.done() + if self.cancelled(): + return + assert not self.done() + if other.cancelled(): + self.cancel() + else: + exception = other.exception() + if exception is not None: + self.set_exception(exception) + else: + result = other.result() + self.set_result(result) + +if events.asyncio is not None: + # Accept also asyncio Future objects for interoperability + _FUTURE_CLASSES = (Future, events.asyncio.Future) +else: + _FUTURE_CLASSES = Future + +def wrap_future(fut, loop=None): + """Wrap concurrent.futures.Future object.""" + if isinstance(fut, _FUTURE_CLASSES): + return fut + assert isinstance(fut, executor.Future), \ + 'concurrent.futures.Future is expected, got {0!r}'.format(fut) + if loop is None: + loop = events.get_event_loop() + new_future = Future(loop=loop) + + def _check_cancel_other(f): + if f.cancelled(): + fut.cancel() + + new_future.add_done_callback(_check_cancel_other) + fut.add_done_callback( + lambda future: loop.call_soon_threadsafe( + new_future._copy_state, future)) + return new_future diff --git a/trollius/locks.py b/trollius/locks.py new file mode 100644 index 00000000..03b4daac --- /dev/null +++ b/trollius/locks.py @@ -0,0 +1,462 @@ +"""Synchronization primitives.""" + +__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore'] + +import collections + +from . import compat +from . import events +from . import futures +from .coroutines import coroutine, From, Return + + +class _ContextManager: + """Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield From(lock)): + + + while failing loudly when accidentally using: + + with lock: + + """ + + def __init__(self, lock): + self._lock = lock + + def __enter__(self): + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + def __exit__(self, *args): + try: + self._lock.release() + finally: + self._lock = None # Crudely prevent reuse. + + +class _ContextManagerMixin(object): + def __enter__(self): + raise RuntimeError( + '"yield From" should be used as context manager expression') + + def __exit__(self, *args): + # This must exist because __enter__ exists, even though that + # always raises; that's how the with-statement works. + pass + + # FIXME: support PEP 492? + # if compat.PY35: + + # def __await__(self): + # # To make "with await lock" work. + # yield from self.acquire() + # return _ContextManager(self) + + # @coroutine + # def __aenter__(self): + # yield from self.acquire() + # # We have no use for the "as ..." clause in the with + # # statement for locks. + # return None + + # @coroutine + # def __aexit__(self, exc_type, exc, tb): + # self.release() + + +class Lock(_ContextManagerMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'yield From'. + + Locks also support the context management protocol. '(yield From(lock))' + should be used as context manager expression. + + Usage: + + lock = Lock() + ... + yield From(lock) + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + with (yield From(lock)): + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + yield From(lock) + else: + # lock is acquired + ... + + """ + + def __init__(self, loop=None): + self._waiters = collections.deque() + self._locked = False + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super(Lock, self).__repr__() + extra = 'locked' if self._locked else 'unlocked' + if self._waiters: + extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) + return '<{0} [{1}]>'.format(res[1:-1], extra) + + def locked(self): + """Return True if lock is acquired.""" + return self._locked + + @coroutine + def acquire(self): + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if not self._waiters and not self._locked: + self._locked = True + raise Return(True) + + fut = futures.Future(loop=self._loop) + self._waiters.append(fut) + try: + yield From(fut) + self._locked = True + raise Return(True) + finally: + self._waiters.remove(fut) + + def release(self): + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + # Wake up the first waiter who isn't cancelled. + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + break + else: + raise RuntimeError('Lock is not acquired.') + + +class Event(object): + """Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + """ + + def __init__(self, loop=None): + self._waiters = collections.deque() + self._value = False + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super(Event, self).__repr__() + extra = 'set' if self._value else 'unset' + if self._waiters: + extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) + return '<{0} [{1}]>'.format(res[1:-1], extra) + + def is_set(self): + """Return True if and only if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + + def clear(self): + """Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.""" + self._value = False + + @coroutine + def wait(self): + """Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + """ + if self._value: + raise Return(True) + + fut = futures.Future(loop=self._loop) + self._waiters.append(fut) + try: + yield From(fut) + raise Return(True) + finally: + self._waiters.remove(fut) + + +class Condition(_ContextManagerMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock=None, loop=None): + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + if lock is None: + lock = Lock(loop=self._loop) + elif lock._loop is not self._loop: + raise ValueError("loop argument must agree with lock") + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters = collections.deque() + + def __repr__(self): + res = super(Condition, self).__repr__() + extra = 'locked' if self.locked() else 'unlocked' + if self._waiters: + extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) + return '<{0} [{1}]>'.format(res[1:-1], extra) + + @coroutine + def wait(self): + """Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + """ + if not self.locked(): + raise RuntimeError('cannot wait on un-acquired lock') + + self.release() + try: + fut = futures.Future(loop=self._loop) + self._waiters.append(fut) + try: + yield From(fut) + raise Return(True) + finally: + self._waiters.remove(fut) + + except Exception as exc: + # Workaround CPython bug #23353: using yield/yield-from in an + # except block of a generator doesn't clear properly + # sys.exc_info() + err = exc + else: + err = None + + if err is not None: + yield From(self.acquire()) + raise err + + yield From(self.acquire()) + + @coroutine + def wait_for(self, predicate): + """Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + """ + result = predicate() + while not result: + yield From(self.wait()) + result = predicate() + raise Return(result) + + def notify(self, n=1): + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError('cannot notify on un-acquired lock') + + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self): + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + +class Semaphore(_ContextManagerMixin): + """A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + """ + + def __init__(self, value=1, loop=None): + if value < 0: + raise ValueError("Semaphore initial value must be >= 0") + self._value = value + self._waiters = collections.deque() + if loop is not None: + self._loop = loop + else: + self._loop = events.get_event_loop() + + def __repr__(self): + res = super(Semaphore, self).__repr__() + extra = 'locked' if self.locked() else 'unlocked,value:{0}'.format( + self._value) + if self._waiters: + extra = '{0},waiters:{1}'.format(extra, len(self._waiters)) + return '<{0} [{1}]>'.format(res[1:-1], extra) + + def locked(self): + """Returns True if semaphore can not be acquired immediately.""" + return self._value == 0 + + @coroutine + def acquire(self): + """Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + """ + if not self._waiters and self._value > 0: + self._value -= 1 + raise Return(True) + + fut = futures.Future(loop=self._loop) + self._waiters.append(fut) + try: + yield From(fut) + self._value -= 1 + raise Return(True) + finally: + self._waiters.remove(fut) + + def release(self): + """Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + """ + self._value += 1 + for waiter in self._waiters: + if not waiter.done(): + waiter.set_result(True) + break + + +class BoundedSemaphore(Semaphore): + """A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + """ + + def __init__(self, value=1, loop=None): + self._bound_value = value + super(BoundedSemaphore, self).__init__(value, loop=loop) + + def release(self): + if self._value >= self._bound_value: + raise ValueError('BoundedSemaphore released too many times') + super(BoundedSemaphore, self).release() diff --git a/trollius/log.py b/trollius/log.py new file mode 100644 index 00000000..23a7074a --- /dev/null +++ b/trollius/log.py @@ -0,0 +1,7 @@ +"""Logging configuration.""" + +import logging + + +# Name the logger after the package. +logger = logging.getLogger(__package__) diff --git a/trollius/proactor_events.py b/trollius/proactor_events.py new file mode 100644 index 00000000..66b4caf1 --- /dev/null +++ b/trollius/proactor_events.py @@ -0,0 +1,549 @@ +"""Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +""" + +__all__ = ['BaseProactorEventLoop'] + +import socket +import warnings + +from . import base_events +from . import compat +from . import constants +from . import futures +from . import sslproto +from . import transports +from .log import logger +from .compat import flatten_bytes +from .py33_exceptions import (BrokenPipeError, + ConnectionAbortedError, ConnectionResetError) + + +class _ProactorBasePipeTransport(transports._FlowControlMixin, + transports.BaseTransport): + """Base class for pipe and socket transports.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super(_ProactorBasePipeTransport, self).__init__(extra, loop) + self._set_extra(sock) + self._sock = sock + self._protocol = protocol + self._server = server + self._buffer = None # None or bytearray. + self._read_fut = None + self._write_fut = None + self._pending_write = 0 + self._conn_lost = 0 + self._closing = False # Set when close() called. + self._eof_written = False + if self._server is not None: + self._server._attach() + self._loop.call_soon(self._protocol.connection_made, self) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(waiter._set_result_unless_cancelled, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + if self._sock is not None: + info.append('fd=%s' % self._sock.fileno()) + if self._read_fut is not None: + info.append('read=%s' % self._read_fut) + if self._write_fut is not None: + info.append("write=%r" % self._write_fut) + if self._buffer: + bufsize = len(self._buffer) + info.append('write_bufsize=%s' % bufsize) + if self._eof_written: + info.append('EOF written') + return '<%s>' % ' '.join(info) + + def _set_extra(self, sock): + self._extra['pipe'] = sock + + def close(self): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if not self._buffer and self._write_fut is None: + self._loop.call_soon(self._call_connection_lost, None) + if self._read_fut is not None: + self._read_fut.cancel() + self._read_fut = None + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._sock is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if self._write_fut: + self._write_fut.cancel() + self._write_fut = None + if self._read_fut: + self._read_fut.cancel() + self._read_fut = None + self._pending_write = 0 + self._buffer = None + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + # XXX If there is a pending overlapped read on the other + # end then it may fail with ERROR_NETNAME_DELETED if we + # just close our end. First calling shutdown() seems to + # cure it, but maybe using DisconnectEx() would be better. + if hasattr(self._sock, 'shutdown'): + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + self._sock = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + size = self._pending_write + if self._buffer is not None: + size += len(self._buffer) + return size + + +class _ProactorReadPipeTransport(_ProactorBasePipeTransport, + transports.ReadTransport): + """Transport for read pipes.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super(_ProactorReadPipeTransport, self).__init__(loop, sock, protocol, + waiter, extra, server) + self._paused = False + self._loop.call_soon(self._loop_reading) + + def pause_reading(self): + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop.call_soon(self._loop_reading, self._read_fut) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _loop_reading(self, fut=None): + if self._paused: + return + data = None + + try: + if fut is not None: + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + self._read_fut = None + data = fut.result() # deliver data later in "finally" clause + + if self._closing: + # since close() has been called we ignore any read data + data = None + return + + if data == b'': + # we got end-of-file so no need to reschedule a new read + return + + # reschedule a new read + self._read_fut = self._loop._proactor.recv(self._sock, 4096) + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error(exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except futures.CancelledError: + if not self._closing: + raise + else: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if data: + self._protocol.data_received(data) + elif data is not None: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if not keep_open: + self.close() + + +class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, + transports.WriteTransport): + """Transport for write pipes.""" + + def write(self, data): + data = flatten_bytes(data) + if self._eof_written: + raise RuntimeError('write_eof() already called') + + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + # Observable states: + # 1. IDLE: _write_fut and _buffer both None + # 2. WRITING: _write_fut set; _buffer None + # 3. BACKED UP: _write_fut set; _buffer a bytearray + # We always copy the data, so the caller can't modify it + # while we're still waiting for the I/O to happen. + if self._write_fut is None: # IDLE -> WRITING + assert self._buffer is None + # Pass a copy, except if it's already immutable. + self._loop_writing(data=bytes(data)) + elif not self._buffer: # WRITING -> BACKED UP + # Make a mutable copy which we can extend. + self._buffer = bytearray(data) + self._maybe_pause_protocol() + else: # BACKED UP + # Append to buffer (also copies). + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _loop_writing(self, f=None, data=None): + try: + assert f is self._write_fut + self._write_fut = None + self._pending_write = 0 + if f: + f.result() + if data is None: + data = self._buffer + self._buffer = None + if not data: + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + if self._eof_written: + self._sock.shutdown(socket.SHUT_WR) + # Now that we've reduced the buffer size, tell the + # protocol to resume writing if it was paused. Note that + # we do this last since the callback is called immediately + # and it may add more data to the buffer (even causing the + # protocol to be paused again). + self._maybe_resume_protocol() + else: + self._write_fut = self._loop._proactor.send(self._sock, data) + if not self._write_fut.done(): + assert self._pending_write == 0 + self._pending_write = len(data) + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_pause_protocol() + else: + self._write_fut.add_done_callback(self._loop_writing) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal write error on pipe transport') + + def can_write_eof(self): + return True + + def write_eof(self): + self.close() + + def abort(self): + self._force_close(None) + + +class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): + def __init__(self, *args, **kw): + super(_ProactorWritePipeTransport, self).__init__(*args, **kw) + self._read_fut = self._loop._proactor.recv(self._sock, 16) + self._read_fut.add_done_callback(self._pipe_closed) + + def _pipe_closed(self, fut): + if fut.cancelled(): + # the transport has been closed + return + assert fut.result() == b'' + if self._closing: + assert self._read_fut is None + return + assert fut is self._read_fut, (fut, self._read_fut) + self._read_fut = None + if self._write_fut is not None: + self._force_close(BrokenPipeError()) + else: + self.close() + + +class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for duplex pipes.""" + + def can_write_eof(self): + return False + + def write_eof(self): + raise NotImplementedError + + +class _ProactorSocketTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for connected sockets.""" + + def _set_extra(self, sock): + self._extra['socket'] = sock + try: + self._extra['sockname'] = sock.getsockname() + except (socket.error, AttributeError): + if self._loop.get_debug(): + logger.warning("getsockname() failed on %r", + sock, exc_info=True) + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except (socket.error, AttributeError): + if self._loop.get_debug(): + logger.warning("getpeername() failed on %r", + sock, exc_info=True) + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing or self._eof_written: + return + self._eof_written = True + if self._write_fut is None: + self._sock.shutdown(socket.SHUT_WR) + + +class BaseProactorEventLoop(base_events.BaseEventLoop): + + def __init__(self, proactor): + super(BaseProactorEventLoop, self).__init__() + logger.debug('Using proactor: %s', proactor.__class__.__name__) + self._proactor = proactor + self._selector = proactor # convenient alias + self._self_reading_future = None + self._accept_futures = {} # socket file descriptor => Future + proactor.set_loop(self) + self._make_self_pipe() + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + return _ProactorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + server_side=False, server_hostname=None, + extra=None, server=None): + if not sslproto._is_sslproto_available(): + raise NotImplementedError("Proactor event loop requires Python 3.5" + " or newer (ssl.MemoryBIO) to support " + "SSL") + + ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter, + server_side, server_hostname) + _ProactorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_duplex_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorDuplexPipeTransport(self, + sock, protocol, waiter, extra) + + def _make_read_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra) + + def _make_write_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + # We want connection_lost() to be called when other end closes + return _ProactorWritePipeTransport(self, + sock, protocol, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + + # Call these methods before closing the event loop (before calling + # BaseEventLoop.close), because they can schedule callbacks with + # call_soon(), which is forbidden when the event loop is closed. + self._stop_accept_futures() + self._close_self_pipe() + self._proactor.close() + self._proactor = None + self._selector = None + + # Close the event loop + super(BaseProactorEventLoop, self).close() + + def sock_recv(self, sock, n): + return self._proactor.recv(sock, n) + + def sock_sendall(self, sock, data): + return self._proactor.send(sock, data) + + def sock_connect(self, sock, address): + try: + if self._debug: + base_events._check_resolved_address(sock, address) + except ValueError as err: + fut = futures.Future(loop=self) + fut.set_exception(err) + return fut + else: + return self._proactor.connect(sock, address) + + def sock_accept(self, sock): + return self._proactor.accept(sock) + + def _socketpair(self): + raise NotImplementedError + + def _close_self_pipe(self): + if self._self_reading_future is not None: + self._self_reading_future.cancel() + self._self_reading_future = None + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = self._socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self.call_soon(self._loop_self_reading) + + def _loop_self_reading(self, f=None): + try: + if f is not None: + f.result() # may raise + f = self._proactor.recv(self._ssock, 4096) + except futures.CancelledError: + # _close_self_pipe() has been called, stop waiting for data + return + except Exception as exc: + self.call_exception_handler({ + 'message': 'Error on reading from the event loop self pipe', + 'exception': exc, + 'loop': self, + }) + else: + self._self_reading_future = f + f.add_done_callback(self._loop_self_reading) + + def _write_to_self(self): + self._csock.send(b'\0') + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None): + + def loop(f=None): + try: + if f is not None: + conn, addr = f.result() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + protocol = protocol_factory() + if sslcontext is not None: + self._make_ssl_transport( + conn, protocol, sslcontext, server_side=True, + extra={'peername': addr}, server=server) + else: + self._make_socket_transport( + conn, protocol, + extra={'peername': addr}, server=server) + if self.is_closed(): + return + f = self._proactor.accept(sock) + except OSError as exc: + if sock.fileno() != -1: + self.call_exception_handler({ + 'message': 'Accept failed on a socket', + 'exception': exc, + 'socket': sock, + }) + sock.close() + elif self._debug: + logger.debug("Accept failed on socket %r", + sock, exc_info=True) + except futures.CancelledError: + sock.close() + else: + self._accept_futures[sock.fileno()] = f + f.add_done_callback(loop) + + self.call_soon(loop) + + def _process_events(self, event_list): + # Events are processed in the IocpProactor._poll() method + pass + + def _stop_accept_futures(self): + for future in self._accept_futures.values(): + future.cancel() + self._accept_futures.clear() + + def _stop_serving(self, sock): + self._stop_accept_futures() + self._proactor._stop_serving(sock) + sock.close() diff --git a/trollius/protocols.py b/trollius/protocols.py new file mode 100644 index 00000000..2c18287b --- /dev/null +++ b/trollius/protocols.py @@ -0,0 +1,134 @@ +"""Abstract Protocol class.""" + +__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol', + 'SubprocessProtocol'] + + +class BaseProtocol(object): + """Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + """ + + def connection_made(self, transport): + """Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + """ + + def connection_lost(self, exc): + """Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + + def pause_writing(self): + """Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + """ + + def resume_writing(self): + """Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + """ + + +class Protocol(BaseProtocol): + """Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + """ + + def data_received(self, data): + """Called when some data is received. + + The argument is a bytes object. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class DatagramProtocol(BaseProtocol): + """Interface for datagram protocol.""" + + def datagram_received(self, data, addr): + """Called when some datagram is received.""" + + def error_received(self, exc): + """Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + """ + + +class SubprocessProtocol(BaseProtocol): + """Interface for protocol for subprocess calls.""" + + def pipe_data_received(self, fd, data): + """Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + """ + + def pipe_connection_lost(self, fd, exc): + """Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + """ + + def process_exited(self): + """Called when subprocess has exited.""" diff --git a/trollius/py27_weakrefset.py b/trollius/py27_weakrefset.py new file mode 100644 index 00000000..990c3a6b --- /dev/null +++ b/trollius/py27_weakrefset.py @@ -0,0 +1,202 @@ +# Access WeakSet through the weakref module. +# This code is separated-out because it is needed +# by abc.py to load everything else at startup. + +from _weakref import ref + +__all__ = ['WeakSet'] + + +class _IterationGuard(object): + # This context manager registers itself in the current iterators of the + # weak container, such as to delay all removals until the context manager + # exits. + # This technique should be relatively thread-safe (since sets are). + + def __init__(self, weakcontainer): + # Don't create cycles + self.weakcontainer = ref(weakcontainer) + + def __enter__(self): + w = self.weakcontainer() + if w is not None: + w._iterating.add(self) + return self + + def __exit__(self, e, t, b): + w = self.weakcontainer() + if w is not None: + s = w._iterating + s.remove(self) + if not s: + w._commit_removals() + + +class WeakSet(object): + def __init__(self, data=None): + self.data = set() + def _remove(item, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(item) + else: + self.data.discard(item) + self._remove = _remove + # A list of keys to be removed + self._pending_removals = [] + self._iterating = set() + if data is not None: + self.update(data) + + def _commit_removals(self): + l = self._pending_removals + discard = self.data.discard + while l: + discard(l.pop()) + + def __iter__(self): + with _IterationGuard(self): + for itemref in self.data: + item = itemref() + if item is not None: + yield item + + def __len__(self): + return len(self.data) - len(self._pending_removals) + + def __contains__(self, item): + try: + wr = ref(item) + except TypeError: + return False + return wr in self.data + + def __reduce__(self): + return (self.__class__, (list(self),), + getattr(self, '__dict__', None)) + + __hash__ = None + + def add(self, item): + if self._pending_removals: + self._commit_removals() + self.data.add(ref(item, self._remove)) + + def clear(self): + if self._pending_removals: + self._commit_removals() + self.data.clear() + + def copy(self): + return self.__class__(self) + + def pop(self): + if self._pending_removals: + self._commit_removals() + while True: + try: + itemref = self.data.pop() + except KeyError: + raise KeyError('pop from empty WeakSet') + item = itemref() + if item is not None: + return item + + def remove(self, item): + if self._pending_removals: + self._commit_removals() + self.data.remove(ref(item)) + + def discard(self, item): + if self._pending_removals: + self._commit_removals() + self.data.discard(ref(item)) + + def update(self, other): + if self._pending_removals: + self._commit_removals() + for element in other: + self.add(element) + + def __ior__(self, other): + self.update(other) + return self + + def difference(self, other): + newset = self.copy() + newset.difference_update(other) + return newset + __sub__ = difference + + def difference_update(self, other): + self.__isub__(other) + def __isub__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.difference_update(ref(item) for item in other) + return self + + def intersection(self, other): + return self.__class__(item for item in other if item in self) + __and__ = intersection + + def intersection_update(self, other): + self.__iand__(other) + def __iand__(self, other): + if self._pending_removals: + self._commit_removals() + self.data.intersection_update(ref(item) for item in other) + return self + + def issubset(self, other): + return self.data.issubset(ref(item) for item in other) + __le__ = issubset + + def __lt__(self, other): + return self.data < set(ref(item) for item in other) + + def issuperset(self, other): + return self.data.issuperset(ref(item) for item in other) + __ge__ = issuperset + + def __gt__(self, other): + return self.data > set(ref(item) for item in other) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.data == set(ref(item) for item in other) + + def __ne__(self, other): + opposite = self.__eq__(other) + if opposite is NotImplemented: + return NotImplemented + return not opposite + + def symmetric_difference(self, other): + newset = self.copy() + newset.symmetric_difference_update(other) + return newset + __xor__ = symmetric_difference + + def symmetric_difference_update(self, other): + self.__ixor__(other) + def __ixor__(self, other): + if self._pending_removals: + self._commit_removals() + if self is other: + self.data.clear() + else: + self.data.symmetric_difference_update(ref(item, self._remove) for item in other) + return self + + def union(self, other): + return self.__class__(e for s in (self, other) for e in s) + __or__ = union + + def isdisjoint(self, other): + return len(self.intersection(other)) == 0 diff --git a/trollius/py33_exceptions.py b/trollius/py33_exceptions.py new file mode 100644 index 00000000..477aaddf --- /dev/null +++ b/trollius/py33_exceptions.py @@ -0,0 +1,147 @@ +__all__ = ['BlockingIOError', 'BrokenPipeError', 'ChildProcessError', + 'ConnectionRefusedError', 'ConnectionResetError', + 'InterruptedError', 'ConnectionAbortedError', 'PermissionError', + 'FileNotFoundError', 'ProcessLookupError', + ] + +import errno +import select +import socket +import sys +try: + import ssl +except ImportError: + ssl = None + +from .compat import PY33 + +if PY33: + import builtins + BlockingIOError = builtins.BlockingIOError + BrokenPipeError = builtins.BrokenPipeError + ChildProcessError = builtins.ChildProcessError + ConnectionRefusedError = builtins.ConnectionRefusedError + ConnectionResetError = builtins.ConnectionResetError + InterruptedError = builtins.InterruptedError + ConnectionAbortedError = builtins.ConnectionAbortedError + PermissionError = builtins.PermissionError + FileNotFoundError = builtins.FileNotFoundError + ProcessLookupError = builtins.ProcessLookupError + +else: + # Python < 3.3 + class BlockingIOError(OSError): + pass + + class BrokenPipeError(OSError): + pass + + class ChildProcessError(OSError): + pass + + class ConnectionRefusedError(OSError): + pass + + class InterruptedError(OSError): + pass + + class ConnectionResetError(OSError): + pass + + class ConnectionAbortedError(OSError): + pass + + class PermissionError(OSError): + pass + + class FileNotFoundError(OSError): + pass + + class ProcessLookupError(OSError): + pass + + +_MAP_ERRNO = { + errno.EACCES: PermissionError, + errno.EAGAIN: BlockingIOError, + errno.EALREADY: BlockingIOError, + errno.ECHILD: ChildProcessError, + errno.ECONNABORTED: ConnectionAbortedError, + errno.ECONNREFUSED: ConnectionRefusedError, + errno.ECONNRESET: ConnectionResetError, + errno.EINPROGRESS: BlockingIOError, + errno.EINTR: InterruptedError, + errno.ENOENT: FileNotFoundError, + errno.EPERM: PermissionError, + errno.EPIPE: BrokenPipeError, + errno.ESHUTDOWN: BrokenPipeError, + errno.EWOULDBLOCK: BlockingIOError, + errno.ESRCH: ProcessLookupError, +} + +if hasattr(errno, 'EBADF') and errno.EBADF not in _MAP_ERRNO: + _MAP_ERRNO[errno.EBADF] = OSError + +if sys.platform == 'win32': + from trollius import _overlapped + _MAP_ERRNO.update({ + _overlapped.ERROR_CONNECTION_REFUSED: ConnectionRefusedError, + _overlapped.ERROR_CONNECTION_ABORTED: ConnectionAbortedError, + _overlapped.ERROR_NETNAME_DELETED: ConnectionResetError, + }) + + +def get_error_class(key, default): + return _MAP_ERRNO.get(key, default) + + +if sys.version_info >= (3,): + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value +else: + exec("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +def _wrap_error(exc, mapping, key): + if key not in mapping: + return + new_err_cls = mapping[key] + new_err = new_err_cls(*exc.args) + + # raise a new exception with the original traceback + if hasattr(exc, '__traceback__'): + traceback = exc.__traceback__ + else: + traceback = sys.exc_info()[2] + reraise(new_err_cls, new_err, traceback) + + +if not PY33: + def wrap_error(func, *args, **kw): + """ + Wrap socket.error, IOError, OSError, select.error to raise new specialized + exceptions of Python 3.3 like InterruptedError (PEP 3151). + """ + try: + return func(*args, **kw) + except (socket.error, IOError, OSError) as exc: + if ssl is not None and isinstance(exc, ssl.SSLError): + raise + if hasattr(exc, 'winerror'): + _wrap_error(exc, _MAP_ERRNO, exc.winerror) + # _MAP_ERRNO does not contain all Windows errors. + # For some errors like "file not found", exc.errno should + # be used (ex: ENOENT). + _wrap_error(exc, _MAP_ERRNO, exc.errno) + raise + except select.error as exc: + if exc.args: + _wrap_error(exc, _MAP_ERRNO, exc.args[0]) + raise +else: + def wrap_error(func, *args, **kw): + return func(*args, **kw) diff --git a/trollius/py33_winapi.py b/trollius/py33_winapi.py new file mode 100644 index 00000000..792bc459 --- /dev/null +++ b/trollius/py33_winapi.py @@ -0,0 +1,75 @@ + +__all__ = [ + 'CloseHandle', 'CreateNamedPipe', 'CreateFile', 'ConnectNamedPipe', + 'NULL', + 'GENERIC_READ', 'GENERIC_WRITE', 'OPEN_EXISTING', 'INFINITE', + 'PIPE_ACCESS_INBOUND', + 'PIPE_ACCESS_DUPLEX', 'PIPE_TYPE_MESSAGE', 'PIPE_READMODE_MESSAGE', + 'PIPE_WAIT', 'PIPE_UNLIMITED_INSTANCES', 'NMPWAIT_WAIT_FOREVER', + 'FILE_FLAG_OVERLAPPED', 'FILE_FLAG_FIRST_PIPE_INSTANCE', + 'WaitForMultipleObjects', 'WaitForSingleObject', + 'WAIT_OBJECT_0', 'ERROR_IO_PENDING', + ] + +try: + # FIXME: use _overlapped on Python 3.3? see windows_utils.pipe() + from _winapi import ( + CloseHandle, CreateNamedPipe, CreateFile, ConnectNamedPipe, + NULL, + GENERIC_READ, GENERIC_WRITE, OPEN_EXISTING, INFINITE, + PIPE_ACCESS_INBOUND, + PIPE_ACCESS_DUPLEX, PIPE_TYPE_MESSAGE, PIPE_READMODE_MESSAGE, + PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, NMPWAIT_WAIT_FOREVER, + FILE_FLAG_OVERLAPPED, FILE_FLAG_FIRST_PIPE_INSTANCE, + WaitForMultipleObjects, WaitForSingleObject, + WAIT_OBJECT_0, ERROR_IO_PENDING, + ) +except ImportError: + # Python < 3.3 + from _multiprocessing import win32 + import _subprocess + + from trollius import _overlapped + + CloseHandle = win32.CloseHandle + CreateNamedPipe = win32.CreateNamedPipe + CreateFile = win32.CreateFile + NULL = win32.NULL + + GENERIC_READ = win32.GENERIC_READ + GENERIC_WRITE = win32.GENERIC_WRITE + OPEN_EXISTING = win32.OPEN_EXISTING + INFINITE = win32.INFINITE + + PIPE_ACCESS_INBOUND = win32.PIPE_ACCESS_INBOUND + PIPE_ACCESS_DUPLEX = win32.PIPE_ACCESS_DUPLEX + PIPE_READMODE_MESSAGE = win32.PIPE_READMODE_MESSAGE + PIPE_TYPE_MESSAGE = win32.PIPE_TYPE_MESSAGE + PIPE_WAIT = win32.PIPE_WAIT + PIPE_UNLIMITED_INSTANCES = win32.PIPE_UNLIMITED_INSTANCES + NMPWAIT_WAIT_FOREVER = win32.NMPWAIT_WAIT_FOREVER + + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 + + WAIT_OBJECT_0 = _subprocess.WAIT_OBJECT_0 + WaitForSingleObject = _subprocess.WaitForSingleObject + ERROR_IO_PENDING = _overlapped.ERROR_IO_PENDING + + def ConnectNamedPipe(handle, overlapped): + ov = _overlapped.Overlapped() + ov.ConnectNamedPipe(handle) + return ov + + def WaitForMultipleObjects(events, wait_all, timeout): + if not wait_all: + raise NotImplementedError() + + for ev in events: + res = WaitForSingleObject(ev, timeout) + if res != WAIT_OBJECT_0: + err = win32.GetLastError() + msg = _overlapped.FormatMessage(err) + raise WindowsError(err, msg) + + return WAIT_OBJECT_0 diff --git a/trollius/py3_ssl.py b/trollius/py3_ssl.py new file mode 100644 index 00000000..c592ee66 --- /dev/null +++ b/trollius/py3_ssl.py @@ -0,0 +1,149 @@ +""" +Backport SSL functions and exceptions: +- BACKPORT_SSL_ERRORS (bool) +- SSLWantReadError, SSLWantWriteError, SSLEOFError +- BACKPORT_SSL_CONTEXT (bool) +- SSLContext +- wrap_socket() +- wrap_ssl_error() +""" +import errno +import ssl +import sys +from trollius.py33_exceptions import _wrap_error + +__all__ = ["SSLContext", "BACKPORT_SSL_ERRORS", "BACKPORT_SSL_CONTEXT", + "SSLWantReadError", "SSLWantWriteError", "SSLEOFError", + ] + +try: + SSLWantReadError = ssl.SSLWantReadError + SSLWantWriteError = ssl.SSLWantWriteError + SSLEOFError = ssl.SSLEOFError + BACKPORT_SSL_ERRORS = False +except AttributeError: + # Python < 3.3 + BACKPORT_SSL_ERRORS = True + + class SSLWantReadError(ssl.SSLError): + pass + + class SSLWantWriteError(ssl.SSLError): + pass + + class SSLEOFError(ssl.SSLError): + pass + + +try: + SSLContext = ssl.SSLContext + BACKPORT_SSL_CONTEXT = False + wrap_socket = ssl.wrap_socket +except AttributeError: + # Python < 3.2 + BACKPORT_SSL_CONTEXT = True + + if (sys.version_info < (2, 6, 6)): + # SSLSocket constructor has bugs in Python older than 2.6.6: + # http://bugs.python.org/issue5103 + # http://bugs.python.org/issue7943 + from socket import socket, error as socket_error, _delegate_methods + import _ssl + + class BackportSSLSocket(ssl.SSLSocket): + # Override SSLSocket.__init__() + def __init__(self, sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=ssl.CERT_NONE, + ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True): + socket.__init__(self, _sock=sock._sock) + # The initializer for socket overrides the methods send(), recv(), etc. + # in the instancce, which we don't need -- but we want to provide the + # methods defined in SSLSocket. + for attr in _delegate_methods: + try: + delattr(self, attr) + except AttributeError: + pass + + if certfile and not keyfile: + keyfile = certfile + # see if it's connected + try: + socket.getpeername(self) + except socket_error as e: + if e.errno != errno.ENOTCONN: + raise + # no, no connection yet + self._connected = False + self._sslobj = None + else: + # yes, create the SSL object + self._connected = True + self._sslobj = _ssl.sslwrap(self._sock, server_side, + keyfile, certfile, + cert_reqs, ssl_version, ca_certs) + if do_handshake_on_connect: + self.do_handshake() + self.keyfile = keyfile + self.certfile = certfile + self.cert_reqs = cert_reqs + self.ssl_version = ssl_version + self.ca_certs = ca_certs + self.do_handshake_on_connect = do_handshake_on_connect + self.suppress_ragged_eofs = suppress_ragged_eofs + self._makefile_refs = 0 + + def wrap_socket(sock, server_hostname=None, **kwargs): + # ignore server_hostname parameter, not supported + kwargs.pop('server_hostname', None) + return BackportSSLSocket(sock, **kwargs) + else: + _wrap_socket = ssl.wrap_socket + + def wrap_socket(sock, **kwargs): + # ignore server_hostname parameter, not supported + kwargs.pop('server_hostname', None) + return _wrap_socket(sock, **kwargs) + + + class SSLContext(object): + def __init__(self, protocol=ssl.PROTOCOL_SSLv23): + self.protocol = protocol + self.certfile = None + self.keyfile = None + + def load_cert_chain(self, certfile, keyfile): + self.certfile = certfile + self.keyfile = keyfile + + def wrap_socket(self, sock, **kwargs): + return wrap_socket(sock, + ssl_version=self.protocol, + certfile=self.certfile, + keyfile=self.keyfile, + **kwargs) + + @property + def verify_mode(self): + return ssl.CERT_NONE + + +if BACKPORT_SSL_ERRORS: + _MAP_ERRORS = { + ssl.SSL_ERROR_WANT_READ: SSLWantReadError, + ssl.SSL_ERROR_WANT_WRITE: SSLWantWriteError, + ssl.SSL_ERROR_EOF: SSLEOFError, + } + + def wrap_ssl_error(func, *args, **kw): + try: + return func(*args, **kw) + except ssl.SSLError as exc: + if exc.args: + _wrap_error(exc, _MAP_ERRORS, exc.args[0]) + raise +else: + def wrap_ssl_error(func, *args, **kw): + return func(*args, **kw) diff --git a/trollius/queues.py b/trollius/queues.py new file mode 100644 index 00000000..18167ab7 --- /dev/null +++ b/trollius/queues.py @@ -0,0 +1,325 @@ +"""Queues""" + +__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty'] + +import collections +import heapq + +from . import compat +from . import events +from . import futures +from . import locks +from .coroutines import coroutine, From, Return + + +class QueueEmpty(Exception): + """Exception raised when Queue.get_nowait() is called on a Queue object + which is empty. + """ + pass + + +class QueueFull(Exception): + """Exception raised when the Queue.put_nowait() method is called on a Queue + object which is full. + """ + pass + + +class Queue(object): + """A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "yield from put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + """ + + def __init__(self, maxsize=0, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._maxsize = maxsize + + # Futures. + self._getters = collections.deque() + # Futures + self._putters = collections.deque() + self._unfinished_tasks = 0 + self._finished = locks.Event(loop=self._loop) + self._finished.set() + self._init(maxsize) + + # These three are overridable in subclasses. + + def _init(self, maxsize): + self._queue = collections.deque() + + def _get(self): + return self._queue.popleft() + + def _put(self, item): + self._queue.append(item) + + # End of the overridable methods. + + def __put_internal(self, item): + self._put(item) + self._unfinished_tasks += 1 + self._finished.clear() + + def __repr__(self): + return '<{0} at {1:#x} {2}>'.format( + type(self).__name__, id(self), self._format()) + + def __str__(self): + return '<{0} {1}>'.format(type(self).__name__, self._format()) + + def _format(self): + result = 'maxsize={0!r}'.format(self._maxsize) + if getattr(self, '_queue', None): + result += ' _queue={0!r}'.format(list(self._queue)) + if self._getters: + result += ' _getters[{0}]'.format(len(self._getters)) + if self._putters: + result += ' _putters[{0}]'.format(len(self._putters)) + if self._unfinished_tasks: + result += ' tasks={0}'.format(self._unfinished_tasks) + return result + + def _consume_done_getters(self): + # Delete waiters at the head of the get() queue who've timed out. + while self._getters and self._getters[0].done(): + self._getters.popleft() + + def _consume_done_putters(self): + # Delete waiters at the head of the put() queue who've timed out. + while self._putters and self._putters[0].done(): + self._putters.popleft() + + def qsize(self): + """Number of items in the queue.""" + return len(self._queue) + + @property + def maxsize(self): + """Number of items allowed in the queue.""" + return self._maxsize + + def empty(self): + """Return True if the queue is empty, False otherwise.""" + return not self._queue + + def full(self): + """Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + """ + if self._maxsize <= 0: + return False + else: + return self.qsize() >= self._maxsize + + @coroutine + def put(self, item): + """Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + + This method is a coroutine. + """ + self._consume_done_getters() + if self._getters: + assert not self._queue, ( + 'queue non-empty, why are getters waiting?') + + getter = self._getters.popleft() + self.__put_internal(item) + + # getter cannot be cancelled, we just removed done getters + getter.set_result(self._get()) + + elif self._maxsize > 0 and self._maxsize <= self.qsize(): + waiter = futures.Future(loop=self._loop) + + self._putters.append(waiter) + yield From(waiter) + self._put(item) + + else: + self.__put_internal(item) + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + """ + self._consume_done_getters() + if self._getters: + assert not self._queue, ( + 'queue non-empty, why are getters waiting?') + + getter = self._getters.popleft() + self.__put_internal(item) + + # getter cannot be cancelled, we just removed done getters + getter.set_result(self._get()) + + elif self._maxsize > 0 and self._maxsize <= self.qsize(): + raise QueueFull + else: + self.__put_internal(item) + + @coroutine + def get(self): + """Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + + This method is a coroutine. + """ + self._consume_done_putters() + if self._putters: + assert self.full(), 'queue not full, why are putters waiting?' + putter = self._putters.popleft() + + # When a getter runs and frees up a slot so this putter can + # run, we need to defer the put for a tick to ensure that + # getters and putters alternate perfectly. See + # ChannelTest.test_wait. + self._loop.call_soon(putter._set_result_unless_cancelled, None) + + raise Return(self._get()) + + elif self.qsize(): + raise Return(self._get()) + else: + waiter = futures.Future(loop=self._loop) + self._getters.append(waiter) + try: + value = (yield From(waiter)) + raise Return(value) + except futures.CancelledError: + # if we get CancelledError, it means someone cancelled this + # get() coroutine. But there is a chance that the waiter + # already is ready and contains an item that has just been + # removed from the queue. In this case, we need to put the item + # back into the front of the queue. This get() must either + # succeed without fault or, if it gets cancelled, it must be as + # if it never happened. + if waiter.done(): + self._put_it_back(waiter.result()) + raise + + def _put_it_back(self, item): + """ + This is called when we have a waiter to get() an item and this waiter + gets cancelled. In this case, we put the item back: wake up another + waiter or put it in the _queue. + """ + self._consume_done_getters() + if self._getters: + assert not self._queue, ( + 'queue non-empty, why are getters waiting?') + + getter = self._getters.popleft() + self.__put_internal(item) + + # getter cannot be cancelled, we just removed done getters + getter.set_result(item) + else: + self._queue.appendleft(item) + + def get_nowait(self): + """Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + """ + self._consume_done_putters() + if self._putters: + assert self.full(), 'queue not full, why are putters waiting?' + putter = self._putters.popleft() + # Wake putter on next tick. + + # getter cannot be cancelled, we just removed done putters + putter.set_result(None) + + return self._get() + + elif self.qsize(): + return self._get() + else: + raise QueueEmpty + + def task_done(self): + """Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + """ + if self._unfinished_tasks <= 0: + raise ValueError('task_done() called too many times') + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + + @coroutine + def join(self): + """Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + """ + if self._unfinished_tasks > 0: + yield From(self._finished.wait()) + + +class PriorityQueue(Queue): + """A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + """ + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item, heappush=heapq.heappush): + heappush(self._queue, item) + + def _get(self, heappop=heapq.heappop): + return heappop(self._queue) + + +class LifoQueue(Queue): + """A subclass of Queue that retrieves most recently added entries first.""" + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item): + self._queue.append(item) + + def _get(self): + return self._queue.pop() + + +if not compat.PY35: + JoinableQueue = Queue + """Deprecated alias for Queue.""" + __all__.append('JoinableQueue') diff --git a/trollius/selector_events.py b/trollius/selector_events.py new file mode 100644 index 00000000..67ef26e6 --- /dev/null +++ b/trollius/selector_events.py @@ -0,0 +1,1092 @@ +"""Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +""" + +__all__ = ['BaseSelectorEventLoop'] + +import collections +import errno +import functools +import socket +import sys +import warnings +try: + import ssl + from .py3_ssl import wrap_ssl_error, SSLWantReadError, SSLWantWriteError +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import compat +from . import constants +from . import events +from . import futures +from . import selectors +from . import sslproto +from . import transports +from .compat import flatten_bytes +from .coroutines import coroutine, From +from .log import logger +from .py33_exceptions import (wrap_error, + BlockingIOError, InterruptedError, ConnectionAbortedError, BrokenPipeError, + ConnectionResetError) + +# On Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4, +# _SelectorSslTransport._read_ready() hangs if the socket has no data. +# Example: test_events.test_create_server_ssl() +_SSL_REQUIRES_SELECT = (sys.version_info < (2, 6, 6)) +if _SSL_REQUIRES_SELECT: + import select + + +def _get_socket_error(sock, address): + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # Jump to the except clause below. + raise OSError(err, 'Connect call failed %s' % (address,)) + + +def _test_selector_event(selector, fd, event): + # Test if the selector is monitoring 'event' events + # for the file descriptor 'fd'. + try: + key = selector.get_key(fd) + except KeyError: + return False + else: + return bool(key.events & event) + + +class BaseSelectorEventLoop(base_events.BaseEventLoop): + """Selector event loop. + + See events.EventLoop for API specification. + """ + + def __init__(self, selector=None): + super(BaseSelectorEventLoop, self).__init__() + + if selector is None: + selector = selectors.DefaultSelector() + logger.debug('Using selector: %s', selector.__class__.__name__) + self._selector = selector + self._make_self_pipe() + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + return _SelectorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None, + server_side=False, server_hostname=None, + extra=None, server=None): + if not sslproto._is_sslproto_available(): + return self._make_legacy_ssl_transport( + rawsock, protocol, sslcontext, waiter, + server_side=server_side, server_hostname=server_hostname, + extra=extra, server=server) + + ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter, + server_side, server_hostname) + _SelectorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext, + waiter, + server_side=False, server_hostname=None, + extra=None, server=None): + # Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used + # on Python 3.4 and older, when ssl.MemoryBIO is not available. + return _SelectorSslTransport( + self, rawsock, protocol, sslcontext, waiter, + server_side, server_hostname, extra, server) + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _SelectorDatagramTransport(self, sock, protocol, + address, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + self._close_self_pipe() + super(BaseSelectorEventLoop, self).close() + if self._selector is not None: + self._selector.close() + self._selector = None + + def _socketpair(self): + raise NotImplementedError + + def _close_self_pipe(self): + self.remove_reader(self._ssock.fileno()) + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = self._socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self.add_reader(self._ssock.fileno(), self._read_from_self) + + def _process_self_data(self, data): + pass + + def _read_from_self(self): + while True: + try: + data = wrap_error(self._ssock.recv, 4096) + if not data: + break + self._process_self_data(data) + except InterruptedError: + continue + except BlockingIOError: + break + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is not None: + try: + wrap_error(csock.send, b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None): + self.add_reader(sock.fileno(), self._accept_connection, + protocol_factory, sock, sslcontext, server) + + def _accept_connection(self, protocol_factory, sock, + sslcontext=None, server=None): + try: + conn, addr = wrap_error(sock.accept) + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + conn.setblocking(False) + except (BlockingIOError, InterruptedError, ConnectionAbortedError): + pass # False alarm. + except socket.error as exc: + # There's nowhere to send the error, so just log it. + if exc.errno in (errno.EMFILE, errno.ENFILE, + errno.ENOBUFS, errno.ENOMEM): + # Some platforms (e.g. Linux keep reporting the FD as + # ready, so we remove the read handler temporarily. + # We'll try again in a while. + self.call_exception_handler({ + 'message': 'socket.accept() out of system resource', + 'exception': exc, + 'socket': sock, + }) + self.remove_reader(sock.fileno()) + self.call_later(constants.ACCEPT_RETRY_DELAY, + self._start_serving, + protocol_factory, sock, sslcontext, server) + else: + raise # The event loop will catch, log and ignore it. + else: + extra = {'peername': addr} + accept = self._accept_connection2(protocol_factory, conn, extra, + sslcontext, server) + self.create_task(accept) + + @coroutine + def _accept_connection2(self, protocol_factory, conn, extra, + sslcontext=None, server=None): + protocol = None + transport = None + try: + protocol = protocol_factory() + waiter = futures.Future(loop=self) + if sslcontext: + transport = self._make_ssl_transport( + conn, protocol, sslcontext, waiter=waiter, + server_side=True, extra=extra, server=server) + else: + transport = self._make_socket_transport( + conn, protocol, waiter=waiter, extra=extra, + server=server) + + try: + yield From(waiter) + except: + transport.close() + raise + + # It's now up to the protocol to handle the connection. + except Exception as exc: + if self._debug: + context = { + 'message': ('Error on transport creation ' + 'for incoming connection'), + 'exception': exc, + } + if protocol is not None: + context['protocol'] = protocol + if transport is not None: + context['transport'] = transport + self.call_exception_handler(context) + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._check_closed() + handle = events.Handle(callback, args, self) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, + (handle, None)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_READ, + (handle, writer)) + if reader is not None: + reader.cancel() + + def remove_reader(self, fd): + """Remove a reader callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (None, writer)) + + if reader is not None: + reader.cancel() + return True + else: + return False + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._check_closed() + handle = events.Handle(callback, args, self) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, + (None, handle)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_WRITE, + (reader, handle)) + if writer is not None: + writer.cancel() + + def remove_writer(self, fd): + """Remove a writer callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (reader, None)) + + if writer is not None: + writer.cancel() + return True + else: + return False + + def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = futures.Future(loop=self) + self._sock_recv(fut, False, sock, n) + return fut + + def _sock_recv(self, fut, registered, sock, n): + # _sock_recv() can add itself as an I/O callback if the operation can't + # be done immediately. Don't use it directly, call sock_recv(). + fd = sock.fileno() + if registered: + # Remove the callback early. It should be rare that the + # selector says the fd is ready but the call still returns + # EAGAIN, and I am willing to take a hit in that case in + # order to simplify the common case. + self.remove_reader(fd) + if fut.cancelled(): + return + try: + data = wrap_error(sock.recv, n) + except (BlockingIOError, InterruptedError): + self.add_reader(fd, self._sock_recv, fut, True, sock, n) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(data) + + def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = futures.Future(loop=self) + if data: + self._sock_sendall(fut, False, sock, data) + else: + fut.set_result(None) + return fut + + def _sock_sendall(self, fut, registered, sock, data): + fd = sock.fileno() + + if registered: + self.remove_writer(fd) + if fut.cancelled(): + return + + try: + n = wrap_error(sock.send, data) + except (BlockingIOError, InterruptedError): + n = 0 + except Exception as exc: + fut.set_exception(exc) + return + + if n == len(data): + fut.set_result(None) + else: + if n: + data = data[n:] + self.add_writer(fd, self._sock_sendall, fut, True, sock, data) + + def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + The address must be already resolved to avoid the trap of hanging the + entire event loop when the address requires doing a DNS lookup. For + example, it must be an IP address, not an hostname, for AF_INET and + AF_INET6 address families. Use getaddrinfo() to resolve the hostname + asynchronously. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = futures.Future(loop=self) + try: + if self._debug: + base_events._check_resolved_address(sock, address) + except ValueError as err: + fut.set_exception(err) + else: + self._sock_connect(fut, sock, address) + return fut + + def _sock_connect(self, fut, sock, address): + fd = sock.fileno() + try: + wrap_error(sock.connect, address) + except (BlockingIOError, InterruptedError): + # Issue #23618: When the C function connect() fails with EINTR, the + # connection runs in background. We have to wait until the socket + # becomes writable to be notified when the connection succeed or + # fails. + fut.add_done_callback(functools.partial(self._sock_connect_done, + fd)) + self.add_writer(fd, self._sock_connect_cb, fut, sock, address) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + def _sock_connect_done(self, fd, fut): + self.remove_writer(fd) + + def _sock_connect_cb(self, fut, sock, address): + if fut.cancelled(): + return + + try: + wrap_error(_get_socket_error, sock, address) + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + + This method is a coroutine. + """ + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = futures.Future(loop=self) + self._sock_accept(fut, False, sock) + return fut + + def _sock_accept(self, fut, registered, sock): + fd = sock.fileno() + if registered: + self.remove_reader(fd) + if fut.cancelled(): + return + try: + conn, address = wrap_error(sock.accept) + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + self.add_reader(fd, self._sock_accept, fut, True, sock) + except Exception as exc: + fut.set_exception(exc) + else: + fut.set_result((conn, address)) + + def _process_events(self, event_list): + for key, mask in event_list: + fileobj, (reader, writer) = key.fileobj, key.data + if mask & selectors.EVENT_READ and reader is not None: + if reader._cancelled: + self.remove_reader(fileobj) + else: + self._add_callback(reader) + if mask & selectors.EVENT_WRITE and writer is not None: + if writer._cancelled: + self.remove_writer(fileobj) + else: + self._add_callback(writer) + + def _stop_serving(self, sock): + self.remove_reader(sock.fileno()) + sock.close() + + +class _SelectorTransport(transports._FlowControlMixin, + transports.Transport): + + max_size = 256 * 1024 # Buffer size passed to recv(). + + _buffer_factory = bytearray # Constructs initial value for self._buffer. + + # Attribute used in the destructor: it must be set even if the constructor + # is not called (see _SelectorSslTransport which may start by raising an + # exception) + _sock = None + + def __init__(self, loop, sock, protocol, extra=None, server=None): + super(_SelectorTransport, self).__init__(extra, loop) + self._extra['socket'] = sock + self._extra['sockname'] = sock.getsockname() + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except socket.error: + self._extra['peername'] = None + self._sock = sock + self._sock_fd = sock.fileno() + self._protocol = protocol + self._protocol_connected = True + self._server = server + self._buffer = self._buffer_factory() + self._conn_lost = 0 # Set when call to connection_lost scheduled. + self._closing = False # Set when close() called. + if self._server is not None: + self._server._attach() + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._sock_fd) + # test if the transport was closed + if self._loop is not None and not self._loop.is_closed(): + polling = _test_selector_event(self._loop._selector, + self._sock_fd, selectors.EVENT_READ) + if polling: + info.append('read=polling') + else: + info.append('read=idle') + + polling = _test_selector_event(self._loop._selector, + self._sock_fd, + selectors.EVENT_WRITE) + if polling: + state = 'polling' + else: + state = 'idle' + + bufsize = self.get_write_buffer_size() + info.append('write=<%s, bufsize=%s>' % (state, bufsize)) + return '<%s>' % ' '.join(info) + + def abort(self): + self._force_close(None) + + def close(self): + if self._closing: + return + self._closing = True + self._loop.remove_reader(self._sock_fd) + if not self._buffer: + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, None) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._sock is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError)): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._conn_lost: + return + if self._buffer: + del self._buffer[:] + self._loop.remove_writer(self._sock_fd) + if not self._closing: + self._closing = True + self._loop.remove_reader(self._sock_fd) + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._sock.close() + self._sock = None + self._protocol = None + self._loop = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + return len(self._buffer) + + +class _SelectorSocketTransport(_SelectorTransport): + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super(_SelectorSocketTransport, self).__init__(loop, sock, protocol, extra, server) + self._eof = False + self._paused = False + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop.add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(waiter._set_result_unless_cancelled, None) + + def pause_reading(self): + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + self._loop.remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop.add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + try: + data = wrap_error(self._sock.recv, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop.remove_reader(self._sock_fd) + else: + self.close() + + def write(self, data): + data = flatten_bytes(data) + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Optimization: try to send now. + try: + n = wrap_error(self._sock.send, data) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._fatal_error(exc, 'Fatal write error on socket transport') + return + else: + data = data[n:] + if not data: + return + # Not all was written; register write handler. + self._loop.add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + data = flatten_bytes(self._buffer) + try: + n = wrap_error(self._sock.send, data) + except (BlockingIOError, InterruptedError): + pass + except Exception as exc: + self._loop.remove_writer(self._sock_fd) + del self._buffer[:] + self._fatal_error(exc, 'Fatal write error on socket transport') + else: + if n: + del self._buffer[:n] + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop.remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + + def write_eof(self): + if self._eof: + return + self._eof = True + if not self._buffer: + self._sock.shutdown(socket.SHUT_WR) + + def can_write_eof(self): + return True + + +class _SelectorSslTransport(_SelectorTransport): + + _buffer_factory = bytearray + + def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None, + server_side=False, server_hostname=None, + extra=None, server=None): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if not sslcontext: + sslcontext = sslproto._create_transport_context(server_side, server_hostname) + + wrap_kwargs = { + 'server_side': server_side, + 'do_handshake_on_connect': False, + } + if server_hostname and not server_side: + wrap_kwargs['server_hostname'] = server_hostname + sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs) + + super(_SelectorSslTransport, self).__init__(loop, sslsock, protocol, extra, server) + # the protocol connection is only made after the SSL handshake + self._protocol_connected = False + + self._server_hostname = server_hostname + self._waiter = waiter + self._sslcontext = sslcontext + self._paused = False + + # SSL-specific extra info. (peercert is set later) + self._extra.update(sslcontext=sslcontext) + + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + start_time = self._loop.time() + else: + start_time = None + self._on_handshake(start_time) + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def _on_handshake(self, start_time): + try: + wrap_ssl_error(self._sock.do_handshake) + except SSLWantReadError: + self._loop.add_reader(self._sock_fd, + self._on_handshake, start_time) + return + except SSLWantWriteError: + self._loop.add_writer(self._sock_fd, + self._on_handshake, start_time) + return + except BaseException as exc: + if self._loop.get_debug(): + logger.warning("%r: SSL handshake failed", + self, exc_info=True) + self._loop.remove_reader(self._sock_fd) + self._loop.remove_writer(self._sock_fd) + self._sock.close() + self._wakeup_waiter(exc) + if isinstance(exc, Exception): + return + else: + raise + + self._loop.remove_reader(self._sock_fd) + self._loop.remove_writer(self._sock_fd) + + peercert = self._sock.getpeercert() + if not hasattr(self._sslcontext, 'check_hostname'): + # Verify hostname if requested, Python 3.4+ uses check_hostname + # and checks the hostname in do_handshake() + if (self._server_hostname and + self._sslcontext.verify_mode != ssl.CERT_NONE): + try: + ssl.match_hostname(peercert, self._server_hostname) + except Exception as exc: + if self._loop.get_debug(): + logger.warning("%r: SSL handshake failed " + "on matching the hostname", + self, exc_info=True) + self._sock.close() + self._wakeup_waiter(exc) + return + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=self._sock.cipher(), + ) + if hasattr(self._sock, 'compression'): + self._extra['compression'] = self._sock.compression() + + self._read_wants_write = False + self._write_wants_read = False + self._loop.add_reader(self._sock_fd, self._read_ready) + self._protocol_connected = True + self._loop.call_soon(self._protocol.connection_made, self) + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(self._wakeup_waiter) + + if self._loop.get_debug(): + dt = self._loop.time() - start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + def pause_reading(self): + # XXX This is a bit icky, given the comment at the top of + # _read_ready(). Is it possible to evoke a deadlock? I don't + # know, although it doesn't look like it; write() will still + # accept more data for the buffer and eventually the app will + # call resume_reading() again, and things will flow again. + + if self._closing: + raise RuntimeError('Cannot pause_reading() when closing') + if self._paused: + raise RuntimeError('Already paused') + self._paused = True + self._loop.remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if not self._paused: + raise RuntimeError('Not paused') + self._paused = False + if self._closing: + return + self._loop.add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _sock_recv(self): + return wrap_ssl_error(self._sock.recv, self.max_size) + + def _read_ready(self): + if self._write_wants_read: + self._write_wants_read = False + self._write_ready() + + if self._buffer: + self._loop.add_writer(self._sock_fd, self._write_ready) + + try: + if _SSL_REQUIRES_SELECT: + rfds = (self._sock.fileno(),) + rfds = select.select(rfds, (), (), 0.0)[0] + if not rfds: + # False alarm. + return + data = wrap_error(self._sock_recv) + except (BlockingIOError, InterruptedError, SSLWantReadError): + pass + except SSLWantWriteError: + self._read_wants_write = True + self._loop.remove_reader(self._sock_fd) + self._loop.add_writer(self._sock_fd, self._write_ready) + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on SSL transport') + else: + if data: + self._protocol.data_received(data) + else: + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + keep_open = self._protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self.close() + + def _write_ready(self): + if self._read_wants_write: + self._read_wants_write = False + self._read_ready() + + if not (self._paused or self._closing): + self._loop.add_reader(self._sock_fd, self._read_ready) + + if self._buffer: + data = flatten_bytes(self._buffer) + try: + n = wrap_error(self._sock.send, data) + except (BlockingIOError, InterruptedError, SSLWantWriteError): + n = 0 + except SSLWantReadError: + n = 0 + self._loop.remove_writer(self._sock_fd) + self._write_wants_read = True + except Exception as exc: + self._loop.remove_writer(self._sock_fd) + del self._buffer[:] + self._fatal_error(exc, 'Fatal write error on SSL transport') + return + + if n: + del self._buffer[:n] + + self._maybe_resume_protocol() # May append to buffer. + + if not self._buffer: + self._loop.remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) + + def write(self, data): + data = flatten_bytes(data) + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + self._loop.add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def can_write_eof(self): + return False + + +class _SelectorDatagramTransport(_SelectorTransport): + + _buffer_factory = collections.deque + + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + super(_SelectorDatagramTransport, self).__init__(loop, sock, + protocol, extra) + self._address = address + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop.add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(waiter._set_result_unless_cancelled, None) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def _read_ready(self): + try: + data, addr = wrap_error(self._sock.recvfrom, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._protocol.error_received(exc) + except Exception as exc: + self._fatal_error(exc, 'Fatal read error on datagram transport') + else: + self._protocol.datagram_received(data, addr) + + def sendto(self, data, addr=None): + data = flatten_bytes(data) + if not data: + return + + if self._address and addr not in (None, self._address): + raise ValueError('Invalid address: must be None or %s' % + (self._address,)) + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + if self._address: + wrap_error(self._sock.send, data) + else: + wrap_error(self._sock.sendto, data, addr) + return + except (BlockingIOError, InterruptedError): + self._loop.add_writer(self._sock_fd, self._sendto_ready) + except OSError as exc: + self._protocol.error_received(exc) + return + except Exception as exc: + self._fatal_error(exc, + 'Fatal write error on datagram transport') + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + self._maybe_pause_protocol() + + def _sendto_ready(self): + while self._buffer: + data, addr = self._buffer.popleft() + try: + if self._address: + wrap_error(self._sock.send, data) + else: + wrap_error(self._sock.sendto, data, addr) + except (BlockingIOError, InterruptedError): + self._buffer.appendleft((data, addr)) # Try again later. + break + except OSError as exc: + self._protocol.error_received(exc) + return + except Exception as exc: + self._fatal_error(exc, + 'Fatal write error on datagram transport') + return + + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop.remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) diff --git a/trollius/selectors.py b/trollius/selectors.py new file mode 100644 index 00000000..2b24e5e0 --- /dev/null +++ b/trollius/selectors.py @@ -0,0 +1,601 @@ +"""Selectors module. + +This module allows high-level and efficient I/O multiplexing, built upon the +`select` module primitives. +""" + + +from abc import ABCMeta, abstractmethod +from collections import namedtuple, Mapping +import math +import select +import sys + +from .py33_exceptions import wrap_error, InterruptedError +from .compat import integer_types + + +# generic events, that must be mapped to implementation-specific ones +EVENT_READ = (1 << 0) +EVENT_WRITE = (1 << 1) + + +def _fileobj_to_fd(fileobj): + """Return a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + """ + if isinstance(fileobj, integer_types): + fd = fileobj + else: + try: + fd = int(fileobj.fileno()) + except (AttributeError, TypeError, ValueError): + raise ValueError("Invalid file object: " + "{0!r}".format(fileobj)) + if fd < 0: + raise ValueError("Invalid file descriptor: {0}".format(fd)) + return fd + + +SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) +"""Object used to associate a file object to its backing file descriptor, +selected event mask and attached data.""" + + +class _SelectorMapping(Mapping): + """Mapping of file objects to selector keys.""" + + def __init__(self, selector): + self._selector = selector + + def __len__(self): + return len(self._selector._fd_to_key) + + def __getitem__(self, fileobj): + try: + fd = self._selector._fileobj_lookup(fileobj) + return self._selector._fd_to_key[fd] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + def __iter__(self): + return iter(self._selector._fd_to_key) + + +class BaseSelector(object): + """Selector abstract base class. + + A selector supports registering file objects to be monitored for specific + I/O events. + + A file object is a file descriptor or any object with a `fileno()` method. + An arbitrary object can be attached to the file object, which can be used + for example to store context information, a callback, etc. + + A selector can use various implementations (select(), poll(), epoll()...) + depending on the platform. The default `Selector` class uses the most + efficient implementation on the current platform. + """ + __metaclass__ = ABCMeta + + @abstractmethod + def register(self, fileobj, events, data=None): + """Register a file object. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + ValueError if events is invalid + KeyError if fileobj is already registered + OSError if fileobj is closed or otherwise is unacceptable to + the underlying system call (if a system call is made) + + Note: + OSError may or may not be raised + """ + raise NotImplementedError + + @abstractmethod + def unregister(self, fileobj): + """Unregister a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + SelectorKey instance + + Raises: + KeyError if fileobj is not registered + + Note: + If fileobj is registered but has since been closed this does + *not* raise OSError (even if the wrapped syscall does) + """ + raise NotImplementedError + + def modify(self, fileobj, events, data=None): + """Change a registered file object monitored events or attached data. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + Anything that unregister() or register() raises + """ + self.unregister(fileobj) + return self.register(fileobj, events, data) + + @abstractmethod + def select(self, timeout=None): + """Perform the actual selection, until some monitored file objects are + ready or a timeout expires. + + Parameters: + timeout -- if timeout > 0, this specifies the maximum wait time, in + seconds + if timeout <= 0, the select() call won't block, and will + report the currently ready file objects + if timeout is None, select() will block until a monitored + file object becomes ready + + Returns: + list of (key, events) for ready file objects + `events` is a bitwise mask of EVENT_READ|EVENT_WRITE + """ + raise NotImplementedError + + def close(self): + """Close the selector. + + This must be called to make sure that any underlying resource is freed. + """ + pass + + def get_key(self, fileobj): + """Return the key associated to a registered file object. + + Returns: + SelectorKey for this file object + """ + mapping = self.get_map() + if mapping is None: + raise RuntimeError('Selector is closed') + try: + return mapping[fileobj] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + + @abstractmethod + def get_map(self): + """Return a mapping of file objects to selector keys.""" + raise NotImplementedError + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + +class _BaseSelectorImpl(BaseSelector): + """Base selector implementation.""" + + def __init__(self): + # this maps file descriptors to keys + self._fd_to_key = {} + # read-only mapping returned by get_map() + self._map = _SelectorMapping(self) + + def _fileobj_lookup(self, fileobj): + """Return a file descriptor from a file object. + + This wraps _fileobj_to_fd() to do an exhaustive search in case + the object is invalid but we still have it in our map. This + is used by unregister() so we can unregister an object that + was previously registered even if it is closed. It is also + used by _SelectorMapping. + """ + try: + return _fileobj_to_fd(fileobj) + except ValueError: + # Do an exhaustive search. + for key in self._fd_to_key.values(): + if key.fileobj is fileobj: + return key.fd + # Raise ValueError after all. + raise + + def register(self, fileobj, events, data=None): + if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): + raise ValueError("Invalid events: {0!r}".format(events)) + + key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) + + if key.fd in self._fd_to_key: + raise KeyError("{0!r} (FD {1}) is already registered" + .format(fileobj, key.fd)) + + self._fd_to_key[key.fd] = key + return key + + def unregister(self, fileobj): + try: + key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + return key + + def modify(self, fileobj, events, data=None): + # TODO: Subclasses can probably optimize this even further. + try: + key = self._fd_to_key[self._fileobj_lookup(fileobj)] + except KeyError: + raise KeyError("{0!r} is not registered".format(fileobj)) + if events != key.events: + self.unregister(fileobj) + key = self.register(fileobj, events, data) + elif data != key.data: + # Use a shortcut to update the data. + key = key._replace(data=data) + self._fd_to_key[key.fd] = key + return key + + def close(self): + self._fd_to_key.clear() + self._map = None + + def get_map(self): + return self._map + + def _key_from_fd(self, fd): + """Return the key associated to a given file descriptor. + + Parameters: + fd -- file descriptor + + Returns: + corresponding key, or None if not found + """ + try: + return self._fd_to_key[fd] + except KeyError: + return None + + +class SelectSelector(_BaseSelectorImpl): + """Select-based selector.""" + + def __init__(self): + super(SelectSelector, self).__init__() + self._readers = set() + self._writers = set() + + def register(self, fileobj, events, data=None): + key = super(SelectSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + self._readers.add(key.fd) + if events & EVENT_WRITE: + self._writers.add(key.fd) + return key + + def unregister(self, fileobj): + key = super(SelectSelector, self).unregister(fileobj) + self._readers.discard(key.fd) + self._writers.discard(key.fd) + return key + + if sys.platform == 'win32': + def _select(self, r, w, _, timeout=None): + r, w, x = select.select(r, w, w, timeout) + return r, w + x, [] + else: + def _select(self, r, w, x, timeout=None): + return select.select(r, w, x, timeout) + + def select(self, timeout=None): + timeout = None if timeout is None else max(timeout, 0) + ready = [] + try: + r, w, _ = wrap_error(self._select, + self._readers, self._writers, [], timeout) + except InterruptedError: + return ready + r = set(r) + w = set(w) + for fd in r | w: + events = 0 + if fd in r: + events |= EVENT_READ + if fd in w: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + +if hasattr(select, 'poll'): + + class PollSelector(_BaseSelectorImpl): + """Poll-based selector.""" + + def __init__(self): + super(PollSelector, self).__init__() + self._poll = select.poll() + + def register(self, fileobj, events, data=None): + key = super(PollSelector, self).register(fileobj, events, data) + poll_events = 0 + if events & EVENT_READ: + poll_events |= select.POLLIN + if events & EVENT_WRITE: + poll_events |= select.POLLOUT + self._poll.register(key.fd, poll_events) + return key + + def unregister(self, fileobj): + key = super(PollSelector, self).unregister(fileobj) + self._poll.unregister(key.fd) + return key + + def select(self, timeout=None): + if timeout is None: + timeout = None + elif timeout <= 0: + timeout = 0 + else: + # poll() has a resolution of 1 millisecond, round away from + # zero to wait *at least* timeout seconds. + timeout = int(math.ceil(timeout * 1e3)) + ready = [] + try: + fd_event_list = wrap_error(self._poll.poll, timeout) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.POLLIN: + events |= EVENT_WRITE + if event & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + +if hasattr(select, 'epoll'): + + class EpollSelector(_BaseSelectorImpl): + """Epoll-based selector.""" + + def __init__(self): + super(EpollSelector, self).__init__() + self._epoll = select.epoll() + + def fileno(self): + return self._epoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(EpollSelector, self).register(fileobj, events, data) + epoll_events = 0 + if events & EVENT_READ: + epoll_events |= select.EPOLLIN + if events & EVENT_WRITE: + epoll_events |= select.EPOLLOUT + self._epoll.register(key.fd, epoll_events) + return key + + def unregister(self, fileobj): + key = super(EpollSelector, self).unregister(fileobj) + try: + self._epoll.unregister(key.fd) + except IOError: + # This can happen if the FD was closed since it + # was registered. + pass + return key + + def select(self, timeout=None): + if timeout is None: + timeout = -1 + elif timeout <= 0: + timeout = 0 + else: + # epoll_wait() has a resolution of 1 millisecond, round away + # from zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1e3) * 1e-3 + + # epoll_wait() expects `maxevents` to be greater than zero; + # we want to make sure that `select()` can be called when no + # FD is registered. + max_ev = max(len(self._fd_to_key), 1) + + ready = [] + try: + fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.EPOLLIN: + events |= EVENT_WRITE + if event & ~select.EPOLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._epoll.close() + super(EpollSelector, self).close() + + +if hasattr(select, 'devpoll'): + + class DevpollSelector(_BaseSelectorImpl): + """Solaris /dev/poll selector.""" + + def __init__(self): + super(DevpollSelector, self).__init__() + self._devpoll = select.devpoll() + + def fileno(self): + return self._devpoll.fileno() + + def register(self, fileobj, events, data=None): + key = super(DevpollSelector, self).register(fileobj, events, data) + poll_events = 0 + if events & EVENT_READ: + poll_events |= select.POLLIN + if events & EVENT_WRITE: + poll_events |= select.POLLOUT + self._devpoll.register(key.fd, poll_events) + return key + + def unregister(self, fileobj): + key = super(DevpollSelector, self).unregister(fileobj) + self._devpoll.unregister(key.fd) + return key + + def select(self, timeout=None): + if timeout is None: + timeout = None + elif timeout <= 0: + timeout = 0 + else: + # devpoll() has a resolution of 1 millisecond, round away from + # zero to wait *at least* timeout seconds. + timeout = math.ceil(timeout * 1e3) + ready = [] + try: + fd_event_list = self._devpoll.poll(timeout) + except InterruptedError: + return ready + for fd, event in fd_event_list: + events = 0 + if event & ~select.POLLIN: + events |= EVENT_WRITE + if event & ~select.POLLOUT: + events |= EVENT_READ + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._devpoll.close() + super(DevpollSelector, self).close() + + +if hasattr(select, 'kqueue'): + + class KqueueSelector(_BaseSelectorImpl): + """Kqueue-based selector.""" + + def __init__(self): + super(KqueueSelector, self).__init__() + self._kqueue = select.kqueue() + + def fileno(self): + return self._kqueue.fileno() + + def register(self, fileobj, events, data=None): + key = super(KqueueSelector, self).register(fileobj, events, data) + if events & EVENT_READ: + kev = select.kevent(key.fd, select.KQ_FILTER_READ, + select.KQ_EV_ADD) + self._kqueue.control([kev], 0, 0) + if events & EVENT_WRITE: + kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, + select.KQ_EV_ADD) + self._kqueue.control([kev], 0, 0) + return key + + def unregister(self, fileobj): + key = super(KqueueSelector, self).unregister(fileobj) + if key.events & EVENT_READ: + kev = select.kevent(key.fd, select.KQ_FILTER_READ, + select.KQ_EV_DELETE) + try: + self._kqueue.control([kev], 0, 0) + except OSError: + # This can happen if the FD was closed since it + # was registered. + pass + if key.events & EVENT_WRITE: + kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, + select.KQ_EV_DELETE) + try: + self._kqueue.control([kev], 0, 0) + except OSError: + # See comment above. + pass + return key + + def select(self, timeout=None): + timeout = None if timeout is None else max(timeout, 0) + max_ev = len(self._fd_to_key) + ready = [] + try: + kev_list = wrap_error(self._kqueue.control, + None, max_ev, timeout) + except InterruptedError: + return ready + for kev in kev_list: + fd = kev.ident + flag = kev.filter + events = 0 + if flag == select.KQ_FILTER_READ: + events |= EVENT_READ + if flag == select.KQ_FILTER_WRITE: + events |= EVENT_WRITE + + key = self._key_from_fd(fd) + if key: + ready.append((key, events & key.events)) + return ready + + def close(self): + self._kqueue.close() + super(KqueueSelector, self).close() + + +# Choose the best implementation, roughly: +# epoll|kqueue|devpoll > poll > select. +# select() also can't accept a FD > FD_SETSIZE (usually around 1024) +if 'KqueueSelector' in globals(): + DefaultSelector = KqueueSelector +elif 'EpollSelector' in globals(): + DefaultSelector = EpollSelector +elif 'DevpollSelector' in globals(): + DefaultSelector = DevpollSelector +elif 'PollSelector' in globals(): + DefaultSelector = PollSelector +else: + DefaultSelector = SelectSelector diff --git a/trollius/sslproto.py b/trollius/sslproto.py new file mode 100644 index 00000000..1404fd79 --- /dev/null +++ b/trollius/sslproto.py @@ -0,0 +1,683 @@ +import collections +import warnings +try: + import ssl + from .py3_ssl import BACKPORT_SSL_CONTEXT +except ImportError: # pragma: no cover + ssl = None + +from . import compat +from . import protocols +from . import transports +from .log import logger +from .py33_exceptions import BrokenPipeError, ConnectionResetError + + +def _create_transport_context(server_side, server_hostname): + if server_side: + raise ValueError('Server side SSL needs a valid SSLContext') + + # Client side may pass ssl=True to use a default + # context; in that case the sslcontext passed is None. + # The default is secure for client connections. + if hasattr(ssl, 'create_default_context'): + # Python 3.4+: use up-to-date strong settings. + sslcontext = ssl.create_default_context() + if not server_hostname: + sslcontext.check_hostname = False + else: + # Fallback for Python 3.3. + sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if not BACKPORT_SSL_CONTEXT: + sslcontext.options |= ssl.OP_NO_SSLv2 + sslcontext.options |= ssl.OP_NO_SSLv3 + sslcontext.set_default_verify_paths() + sslcontext.verify_mode = ssl.CERT_REQUIRED + return sslcontext + + +def _is_sslproto_available(): + return hasattr(ssl, "MemoryBIO") + + +# States of an _SSLPipe. +_UNWRAPPED = "UNWRAPPED" +_DO_HANDSHAKE = "DO_HANDSHAKE" +_WRAPPED = "WRAPPED" +_SHUTDOWN = "SHUTDOWN" + +if ssl is not None: + if hasattr(ssl, 'CertificateError'): + _SSL_ERRORS = (ssl.SSLError, ssl.CertificateError) + else: + _SSL_ERRORS = ssl.SSLError + + +class _SSLPipe(object): + """An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + """ + + max_size = 256 * 1024 # Buffer size passed to read() + + def __init__(self, context, server_side, server_hostname=None): + """ + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + """ + self._context = context + self._server_side = server_side + self._server_hostname = server_hostname + self._state = _UNWRAPPED + self._incoming = ssl.MemoryBIO() + self._outgoing = ssl.MemoryBIO() + self._sslobj = None + self._need_ssldata = False + self._handshake_cb = None + self._shutdown_cb = None + + @property + def context(self): + """The SSL context passed to the constructor.""" + return self._context + + @property + def ssl_object(self): + """The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + """ + return self._sslobj + + @property + def need_ssldata(self): + """Whether more record level data is needed to complete a handshake + that is currently in progress.""" + return self._need_ssldata + + @property + def wrapped(self): + """ + Whether a security layer is currently in effect. + + Return False during handshake. + """ + return self._state == _WRAPPED + + def do_handshake(self, callback=None): + """Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + """ + if self._state != _UNWRAPPED: + raise RuntimeError('handshake in progress or completed') + self._sslobj = self._context.wrap_bio( + self._incoming, self._outgoing, + server_side=self._server_side, + server_hostname=self._server_hostname) + self._state = _DO_HANDSHAKE + self._handshake_cb = callback + ssldata, appdata = self.feed_ssldata(b'', only_handshake=True) + assert len(appdata) == 0 + return ssldata + + def shutdown(self, callback=None): + """Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + """ + if self._state == _UNWRAPPED: + raise RuntimeError('no security layer present') + if self._state == _SHUTDOWN: + raise RuntimeError('shutdown in progress') + assert self._state in (_WRAPPED, _DO_HANDSHAKE) + self._state = _SHUTDOWN + self._shutdown_cb = callback + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + return ssldata + + def feed_eof(self): + """Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + """ + self._incoming.write_eof() + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + + def feed_ssldata(self, data, only_handshake=False): + """Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + """ + if self._state == _UNWRAPPED: + # If unwrapped, pass plaintext data straight through. + if data: + appdata = [data] + else: + appdata = [] + return ([], appdata) + + self._need_ssldata = False + if data: + self._incoming.write(data) + + ssldata = [] + appdata = [] + try: + if self._state == _DO_HANDSHAKE: + # Call do_handshake() until it doesn't raise anymore. + self._sslobj.do_handshake() + self._state = _WRAPPED + if self._handshake_cb: + self._handshake_cb(None) + if only_handshake: + return (ssldata, appdata) + # Handshake done: execute the wrapped block + + if self._state == _WRAPPED: + # Main state: read data from SSL until close_notify + while True: + chunk = self._sslobj.read(self.max_size) + appdata.append(chunk) + if not chunk: # close_notify + break + + elif self._state == _SHUTDOWN: + # Call shutdown() until it doesn't raise anymore. + self._sslobj.unwrap() + self._sslobj = None + self._state = _UNWRAPPED + if self._shutdown_cb: + self._shutdown_cb() + + elif self._state == _UNWRAPPED: + # Drain possible plaintext data after close_notify. + appdata.append(self._incoming.read()) + except _SSL_ERRORS as exc: + if getattr(exc, 'errno', None) not in ( + ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + if self._state == _DO_HANDSHAKE and self._handshake_cb: + self._handshake_cb(exc) + raise + self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ) + + # Check for record level data that needs to be sent back. + # Happens for the initial handshake and renegotiations. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + return (ssldata, appdata) + + def feed_appdata(self, data, offset=0): + """Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + """ + assert 0 <= offset <= len(data) + if self._state == _UNWRAPPED: + # pass through data in unwrapped mode + if offset < len(data): + ssldata = [data[offset:]] + else: + ssldata = [] + return (ssldata, len(data)) + + ssldata = [] + view = memoryview(data) + while True: + self._need_ssldata = False + try: + if offset < len(view): + offset += self._sslobj.write(view[offset:]) + except ssl.SSLError as exc: + # It is not allowed to call write() after unwrap() until the + # close_notify is acknowledged. We return the condition to the + # caller as a short write. + if exc.reason == 'PROTOCOL_IS_SHUTDOWN': + exc.errno = ssl.SSL_ERROR_WANT_READ + if exc.errno not in (ssl.SSL_ERROR_WANT_READ, + ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + raise + self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ) + + # See if there's any record level data back for us. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + if offset == len(view) or self._need_ssldata: + break + return (ssldata, offset) + + +class _SSLProtocolTransport(transports._FlowControlMixin, + transports.Transport): + + def __init__(self, loop, ssl_protocol, app_protocol): + self._loop = loop + self._ssl_protocol = ssl_protocol + self._app_protocol = app_protocol + self._closed = False + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._ssl_protocol._get_extra_info(name, default) + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + self._closed = True + self._ssl_protocol._start_shutdown() + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if not self._closed: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self.close() + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + self._ssl_protocol._transport.pause_reading() + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + self._ssl_protocol._transport.resume_reading() + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to a + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._transport.set_write_buffer_limits(high, low) + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + return self._ssl_protocol._transport.get_write_buffer_size() + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data: expecting a bytes-like instance, got {!r}" + .format(type(data).__name__)) + if not data: + return + self._ssl_protocol._write_appdata(data) + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + return False + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + self._ssl_protocol._abort() + + +class SSLProtocol(protocols.Protocol): + """SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + """ + + def __init__(self, loop, app_protocol, sslcontext, waiter, + server_side=False, server_hostname=None): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if not sslcontext: + sslcontext = _create_transport_context(server_side, server_hostname) + + self._server_side = server_side + if server_hostname and not server_side: + self._server_hostname = server_hostname + else: + self._server_hostname = None + self._sslcontext = sslcontext + # SSL-specific extra info. More info are set when the handshake + # completes. + self._extra = dict(sslcontext=sslcontext) + + # App data write buffering + self._write_backlog = collections.deque() + self._write_buffer_size = 0 + + self._waiter = waiter + self._loop = loop + self._app_protocol = app_protocol + self._app_transport = _SSLProtocolTransport(self._loop, + self, self._app_protocol) + self._sslpipe = None + self._session_established = False + self._in_handshake = False + self._in_shutdown = False + self._transport = None + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def connection_made(self, transport): + """Called when the low-level connection is made. + + Start the SSL handshake. + """ + self._transport = transport + self._sslpipe = _SSLPipe(self._sslcontext, + self._server_side, + self._server_hostname) + self._start_handshake() + + def connection_lost(self, exc): + """Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + if self._session_established: + self._session_established = False + self._loop.call_soon(self._app_protocol.connection_lost, exc) + self._transport = None + self._app_transport = None + + def pause_writing(self): + """Called when the low-level transport's buffer goes over + the high-water mark. + """ + self._app_protocol.pause_writing() + + def resume_writing(self): + """Called when the low-level transport's buffer drains below + the low-water mark. + """ + self._app_protocol.resume_writing() + + def data_received(self, data): + """Called when some SSL data is received. + + The argument is a bytes object. + """ + try: + ssldata, appdata = self._sslpipe.feed_ssldata(data) + except ssl.SSLError as e: + if self._loop.get_debug(): + logger.warning('%r: SSL error %s (reason %s)', + self, e.errno, e.reason) + self._abort() + return + + for chunk in ssldata: + self._transport.write(chunk) + + for chunk in appdata: + if chunk: + self._app_protocol.data_received(chunk) + else: + self._start_shutdown() + break + + def eof_received(self): + """Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + self._wakeup_waiter(ConnectionResetError) + + if not self._in_handshake: + keep_open = self._app_protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self._transport.close() + + def _get_extra_info(self, name, default=None): + if name in self._extra: + return self._extra[name] + else: + return self._transport.get_extra_info(name, default) + + def _start_shutdown(self): + if self._in_shutdown: + return + self._in_shutdown = True + self._write_appdata(b'') + + def _write_appdata(self, data): + self._write_backlog.append((data, 0)) + self._write_buffer_size += len(data) + self._process_write_backlog() + + def _start_handshake(self): + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + self._handshake_start_time = self._loop.time() + else: + self._handshake_start_time = None + self._in_handshake = True + # (b'', 1) is a special value in _process_write_backlog() to do + # the SSL handshake + self._write_backlog.append((b'', 1)) + self._loop.call_soon(self._process_write_backlog) + + def _on_handshake_complete(self, handshake_exc): + self._in_handshake = False + + sslobj = self._sslpipe.ssl_object + try: + if handshake_exc is not None: + raise handshake_exc + + peercert = sslobj.getpeercert() + if not hasattr(self._sslcontext, 'check_hostname'): + # Verify hostname if requested, Python 3.4+ uses check_hostname + # and checks the hostname in do_handshake() + if (self._server_hostname + and self._sslcontext.verify_mode != ssl.CERT_NONE): + ssl.match_hostname(peercert, self._server_hostname) + except BaseException as exc: + if self._loop.get_debug(): + if (hasattr(ssl, 'CertificateError') + and isinstance(exc, ssl.CertificateError)): + logger.warning("%r: SSL handshake failed " + "on verifying the certificate", + self, exc_info=True) + else: + logger.warning("%r: SSL handshake failed", + self, exc_info=True) + self._transport.close() + if isinstance(exc, Exception): + self._wakeup_waiter(exc) + return + else: + raise + + if self._loop.get_debug(): + dt = self._loop.time() - self._handshake_start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=sslobj.cipher(), + compression=sslobj.compression(), + ) + self._app_protocol.connection_made(self._app_transport) + self._wakeup_waiter() + self._session_established = True + # In case transport.write() was already called. Don't call + # immediatly _process_write_backlog(), but schedule it: + # _on_handshake_complete() can be called indirectly from + # _process_write_backlog(), and _process_write_backlog() is not + # reentrant. + self._loop.call_soon(self._process_write_backlog) + + def _process_write_backlog(self): + # Try to make progress on the write backlog. + if self._transport is None: + return + + try: + for i in range(len(self._write_backlog)): + data, offset = self._write_backlog[0] + if data: + ssldata, offset = self._sslpipe.feed_appdata(data, offset) + elif offset: + ssldata = self._sslpipe.do_handshake( + self._on_handshake_complete) + offset = 1 + else: + ssldata = self._sslpipe.shutdown(self._finalize) + offset = 1 + + for chunk in ssldata: + self._transport.write(chunk) + + if offset < len(data): + self._write_backlog[0] = (data, offset) + # A short write means that a write is blocked on a read + # We need to enable reading if it is paused! + assert self._sslpipe.need_ssldata + if self._transport._paused: + self._transport.resume_reading() + break + + # An entire chunk from the backlog was processed. We can + # delete it and reduce the outstanding buffer size. + del self._write_backlog[0] + self._write_buffer_size -= len(data) + except BaseException as exc: + if self._in_handshake: + # BaseExceptions will be re-raised in _on_handshake_complete. + self._on_handshake_complete(exc) + else: + self._fatal_error(exc, 'Fatal error on SSL transport') + if not isinstance(exc, Exception): + # BaseException + raise + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self._transport, + 'protocol': self, + }) + if self._transport: + self._transport._force_close(exc) + + def _finalize(self): + if self._transport is not None: + self._transport.close() + + def _abort(self): + if self._transport is not None: + try: + self._transport.abort() + finally: + self._finalize() diff --git a/trollius/streams.py b/trollius/streams.py new file mode 100644 index 00000000..cde58fb8 --- /dev/null +++ b/trollius/streams.py @@ -0,0 +1,530 @@ +"""Stream-related things.""" + +__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', + 'open_connection', 'start_server', + 'IncompleteReadError', + ] + +import socket + +if hasattr(socket, 'AF_UNIX'): + __all__.extend(['open_unix_connection', 'start_unix_server']) + +from . import coroutines +from . import compat +from . import events +from . import futures +from . import protocols +from .coroutines import coroutine, From, Return +from .py33_exceptions import ConnectionResetError +from .log import logger + + +_DEFAULT_LIMIT = 2**16 + + +class IncompleteReadError(EOFError): + """ + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes + """ + def __init__(self, partial, expected): + EOFError.__init__(self, "%s bytes read on a total of %s expected bytes" + % (len(partial), expected)) + self.partial = partial + self.expected = expected + + +@coroutine +def open_connection(host=None, port=None, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + """ + if loop is None: + loop = events.get_event_loop() + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = yield From(loop.create_connection( + lambda: protocol, host, port, **kwds)) + writer = StreamWriter(transport, protocol, reader, loop) + raise Return(reader, writer) + + +@coroutine +def start_server(client_connected_cb, host=None, port=None, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + """ + if loop is None: + loop = events.get_event_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + server = yield From(loop.create_server(factory, host, port, **kwds)) + raise Return(server) + + +if hasattr(socket, 'AF_UNIX'): + # UNIX Domain Sockets are supported on this platform + + @coroutine + def open_unix_connection(path=None, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `open_connection` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = yield From(loop.create_unix_connection( + lambda: protocol, path, **kwds)) + writer = StreamWriter(transport, protocol, reader, loop) + raise Return(reader, writer) + + + @coroutine + def start_unix_server(client_connected_cb, path=None, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `start_server` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + server = (yield From(loop.create_unix_server(factory, path, **kwds))) + raise Return(server) + + +class FlowControlMixin(protocols.Protocol): + """Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_reading() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + """ + + def __init__(self, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._paused = False + self._drain_waiter = None + self._connection_lost = False + + def pause_writing(self): + assert not self._paused + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses writing", self) + + def resume_writing(self): + assert self._paused + self._paused = False + if self._loop.get_debug(): + logger.debug("%r resumes writing", self) + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def connection_lost(self, exc): + self._connection_lost = True + # Wake up the writer if currently paused. + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + @coroutine + def _drain_helper(self): + if self._connection_lost: + raise ConnectionResetError('Connection lost') + if not self._paused: + return + waiter = self._drain_waiter + assert waiter is None or waiter.cancelled() + waiter = futures.Future(loop=self._loop) + self._drain_waiter = waiter + yield From(waiter) + + +class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): + """Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + """ + + def __init__(self, stream_reader, client_connected_cb=None, loop=None): + super(StreamReaderProtocol, self).__init__(loop=loop) + self._stream_reader = stream_reader + self._stream_writer = None + self._client_connected_cb = client_connected_cb + + def connection_made(self, transport): + self._stream_reader.set_transport(transport) + if self._client_connected_cb is not None: + self._stream_writer = StreamWriter(transport, self, + self._stream_reader, + self._loop) + res = self._client_connected_cb(self._stream_reader, + self._stream_writer) + if coroutines.iscoroutine(res): + self._loop.create_task(res) + + def connection_lost(self, exc): + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) + super(StreamReaderProtocol, self).connection_lost(exc) + + def data_received(self, data): + self._stream_reader.feed_data(data) + + def eof_received(self): + self._stream_reader.feed_eof() + return True + + +class StreamWriter(object): + """Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + """ + + def __init__(self, transport, protocol, reader, loop): + self._transport = transport + self._protocol = protocol + # drain() expects that the reader has a exception() method + assert reader is None or isinstance(reader, StreamReader) + self._reader = reader + self._loop = loop + + def __repr__(self): + info = [self.__class__.__name__, 'transport=%r' % self._transport] + if self._reader is not None: + info.append('reader=%r' % self._reader) + return '<%s>' % ' '.join(info) + + @property + def transport(self): + return self._transport + + def write(self, data): + self._transport.write(data) + + def writelines(self, data): + self._transport.writelines(data) + + def write_eof(self): + return self._transport.write_eof() + + def can_write_eof(self): + return self._transport.can_write_eof() + + def close(self): + return self._transport.close() + + def get_extra_info(self, name, default=None): + return self._transport.get_extra_info(name, default) + + @coroutine + def drain(self): + """Flush the write buffer. + + The intended use is to write + + w.write(data) + yield From(w.drain()) + """ + if self._reader is not None: + exc = self._reader.exception() + if exc is not None: + raise exc + yield From(self._protocol._drain_helper()) + + +class StreamReader(object): + + def __init__(self, limit=_DEFAULT_LIMIT, loop=None): + # The line length limit is a security feature; + # it also doubles as half the buffer limit. + self._limit = limit + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._buffer = bytearray() + self._eof = False # Whether we're done. + self._waiter = None # A future used by _wait_for_data() + self._exception = None + self._transport = None + self._paused = False + + def __repr__(self): + info = ['StreamReader'] + if self._buffer: + info.append('%d bytes' % len(info)) + if self._eof: + info.append('eof') + if self._limit != _DEFAULT_LIMIT: + info.append('l=%d' % self._limit) + if self._waiter: + info.append('w=%r' % self._waiter) + if self._exception: + info.append('e=%r' % self._exception) + if self._transport: + info.append('t=%r' % self._transport) + if self._paused: + info.append('paused') + return '<%s>' % ' '.join(info) + + def exception(self): + return self._exception + + def set_exception(self, exc): + self._exception = exc + + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_exception(exc) + + def _wakeup_waiter(self): + """Wakeup read() or readline() function waiting for data or EOF.""" + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_result(None) + + def set_transport(self, transport): + assert self._transport is None, 'Transport already set' + self._transport = transport + + def _maybe_resume_transport(self): + if self._paused and len(self._buffer) <= self._limit: + self._paused = False + self._transport.resume_reading() + + def feed_eof(self): + self._eof = True + self._wakeup_waiter() + + def at_eof(self): + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + def feed_data(self, data): + assert not self._eof, 'feed_data after feed_eof' + + if not data: + return + + self._buffer.extend(data) + self._wakeup_waiter() + + if (self._transport is not None and + not self._paused and + len(self._buffer) > 2*self._limit): + try: + self._transport.pause_reading() + except NotImplementedError: + # The transport can't be paused. + # We'll just have to buffer all data. + # Forget the transport so we don't keep trying. + self._transport = None + else: + self._paused = True + + @coroutine + def _wait_for_data(self, func_name): + """Wait until feed_data() or feed_eof() is called.""" + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError('%s() called while another coroutine is ' + 'already waiting for incoming data' % func_name) + + # In asyncio, there is no need to recheck if we got data or EOF thanks + # to "yield from". In trollius, a StreamReader method can be called + # after the _wait_for_data() coroutine is scheduled and before it is + # really executed. + if self._buffer or self._eof: + return + + self._waiter = futures.Future(loop=self._loop) + try: + yield From(self._waiter) + finally: + self._waiter = None + + @coroutine + def readline(self): + if self._exception is not None: + raise self._exception + + line = bytearray() + not_enough = True + + while not_enough: + while self._buffer and not_enough: + ichar = self._buffer.find(b'\n') + if ichar < 0: + line.extend(self._buffer) + del self._buffer[:] + else: + ichar += 1 + line.extend(self._buffer[:ichar]) + del self._buffer[:ichar] + not_enough = False + + if len(line) > self._limit: + self._maybe_resume_transport() + raise ValueError('Line is too long') + + if self._eof: + break + + if not_enough: + yield From(self._wait_for_data('readline')) + + self._maybe_resume_transport() + raise Return(bytes(line)) + + @coroutine + def read(self, n=-1): + if self._exception is not None: + raise self._exception + + if not n: + raise Return(b'') + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.read(self._limit) until EOF. + blocks = [] + while True: + block = yield From(self.read(self._limit)) + if not block: + break + blocks.append(block) + raise Return(b''.join(blocks)) + else: + if not self._buffer and not self._eof: + yield From(self._wait_for_data('read')) + + if n < 0 or len(self._buffer) <= n: + data = bytes(self._buffer) + del self._buffer[:] + else: + # n > 0 and len(self._buffer) > n + data = bytes(self._buffer[:n]) + del self._buffer[:n] + + self._maybe_resume_transport() + raise Return(data) + + @coroutine + def readexactly(self, n): + if self._exception is not None: + raise self._exception + + # There used to be "optimized" code here. It created its own + # Future and waited until self._buffer had at least the n + # bytes, then called read(n). Unfortunately, this could pause + # the transport if the argument was larger than the pause + # limit (which is twice self._limit). So now we just read() + # into a local buffer. + + blocks = [] + while n > 0: + block = yield From(self.read(n)) + if not block: + partial = b''.join(blocks) + raise IncompleteReadError(partial, len(partial) + n) + blocks.append(block) + n -= len(block) + + raise Return(b''.join(blocks)) + + # FIXME: should we support __aiter__ and __anext__ in Trollius? + #if compat.PY35: + # @coroutine + # def __aiter__(self): + # return self + # + # @coroutine + # def __anext__(self): + # val = yield from self.readline() + # if val == b'': + # raise StopAsyncIteration + # return val diff --git a/trollius/subprocess.py b/trollius/subprocess.py new file mode 100644 index 00000000..4ed2b5c5 --- /dev/null +++ b/trollius/subprocess.py @@ -0,0 +1,225 @@ +from __future__ import absolute_import + +__all__ = ['create_subprocess_exec', 'create_subprocess_shell'] + +import subprocess + +from . import events +from . import protocols +from . import streams +from . import tasks +from .coroutines import coroutine, From, Return +from .py33_exceptions import BrokenPipeError, ConnectionResetError +from .log import logger + + +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +if hasattr(subprocess, 'DEVNULL'): + DEVNULL = subprocess.DEVNULL + + +class SubprocessStreamProtocol(streams.FlowControlMixin, + protocols.SubprocessProtocol): + """Like StreamReaderProtocol, but for a subprocess.""" + + def __init__(self, limit, loop): + super(SubprocessStreamProtocol, self).__init__(loop=loop) + self._limit = limit + self.stdin = self.stdout = self.stderr = None + self._transport = None + + def __repr__(self): + info = [self.__class__.__name__] + if self.stdin is not None: + info.append('stdin=%r' % self.stdin) + if self.stdout is not None: + info.append('stdout=%r' % self.stdout) + if self.stderr is not None: + info.append('stderr=%r' % self.stderr) + return '<%s>' % ' '.join(info) + + def connection_made(self, transport): + self._transport = transport + + stdout_transport = transport.get_pipe_transport(1) + if stdout_transport is not None: + self.stdout = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stdout.set_transport(stdout_transport) + + stderr_transport = transport.get_pipe_transport(2) + if stderr_transport is not None: + self.stderr = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stderr.set_transport(stderr_transport) + + stdin_transport = transport.get_pipe_transport(0) + if stdin_transport is not None: + self.stdin = streams.StreamWriter(stdin_transport, + protocol=self, + reader=None, + loop=self._loop) + + def pipe_data_received(self, fd, data): + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + reader.feed_data(data) + + def pipe_connection_lost(self, fd, exc): + if fd == 0: + pipe = self.stdin + if pipe is not None: + pipe.close() + self.connection_lost(exc) + return + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader != None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + + def process_exited(self): + self._transport.close() + self._transport = None + + +class Process: + def __init__(self, transport, protocol, loop): + self._transport = transport + self._protocol = protocol + self._loop = loop + self.stdin = protocol.stdin + self.stdout = protocol.stdout + self.stderr = protocol.stderr + self.pid = transport.get_pid() + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.pid) + + @property + def returncode(self): + return self._transport.get_returncode() + + @coroutine + def wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + return_code = yield From(self._transport._wait()) + raise Return(return_code) + + def send_signal(self, signal): + self._transport.send_signal(signal) + + def terminate(self): + self._transport.terminate() + + def kill(self): + self._transport.kill() + + @coroutine + def _feed_stdin(self, input): + debug = self._loop.get_debug() + self.stdin.write(input) + if debug: + logger.debug('%r communicate: feed stdin (%s bytes)', + self, len(input)) + try: + yield From(self.stdin.drain()) + except (BrokenPipeError, ConnectionResetError) as exc: + # communicate() ignores BrokenPipeError and ConnectionResetError + if debug: + logger.debug('%r communicate: stdin got %r', self, exc) + + if debug: + logger.debug('%r communicate: close stdin', self) + self.stdin.close() + + @coroutine + def _noop(self): + return None + + @coroutine + def _read_stream(self, fd): + transport = self._transport.get_pipe_transport(fd) + if fd == 2: + stream = self.stderr + else: + assert fd == 1 + stream = self.stdout + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: read %s', self, name) + output = yield From(stream.read()) + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: close %s', self, name) + transport.close() + raise Return(output) + + @coroutine + def communicate(self, input=None): + if input: + stdin = self._feed_stdin(input) + else: + stdin = self._noop() + if self.stdout is not None: + stdout = self._read_stream(1) + else: + stdout = self._noop() + if self.stderr is not None: + stderr = self._read_stream(2) + else: + stderr = self._noop() + stdin, stdout, stderr = yield From(tasks.gather(stdin, stdout, stderr, + loop=self._loop)) + yield From(self.wait()) + raise Return(stdout, stderr) + + +@coroutine +def create_subprocess_shell(cmd, **kwds): + stdin = kwds.pop('stdin', None) + stdout = kwds.pop('stdout', None) + stderr = kwds.pop('stderr', None) + loop = kwds.pop('loop', None) + limit = kwds.pop('limit', streams._DEFAULT_LIMIT) + if loop is None: + loop = events.get_event_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = yield From(loop.subprocess_shell( + protocol_factory, + cmd, stdin=stdin, stdout=stdout, + stderr=stderr, **kwds)) + raise Return(Process(transport, protocol, loop)) + +@coroutine +def create_subprocess_exec(program, *args, **kwds): + stdin = kwds.pop('stdin', None) + stdout = kwds.pop('stdout', None) + stderr = kwds.pop('stderr', None) + loop = kwds.pop('loop', None) + limit = kwds.pop('limit', streams._DEFAULT_LIMIT) + if loop is None: + loop = events.get_event_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = yield From(loop.subprocess_exec( + protocol_factory, + program, *args, + stdin=stdin, stdout=stdout, + stderr=stderr, **kwds)) + raise Return(Process(transport, protocol, loop)) diff --git a/trollius/tasks.py b/trollius/tasks.py new file mode 100644 index 00000000..440a6d8b --- /dev/null +++ b/trollius/tasks.py @@ -0,0 +1,754 @@ +"""Support for tasks, coroutines and the scheduler.""" +from __future__ import print_function + +__all__ = ['Task', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + 'wait', 'wait_for', 'as_completed', 'sleep', 'async', + 'gather', 'shield', 'ensure_future', + ] + +import functools +import linecache +import traceback +import warnings +try: + from weakref import WeakSet +except ImportError: + # Python 2.6 + from .py27_weakrefset import WeakSet + +from . import compat +from . import coroutines +from . import events +from . import executor +from . import futures +from .locks import Lock, Condition, Semaphore, _ContextManager +from .coroutines import coroutine, From, Return, ReturnException + + + +@coroutine +def _lock_coroutine(lock): + yield From(lock.acquire()) + raise Return(_ContextManager(lock)) + + +class Task(futures.Future): + """A coroutine wrapped in a Future.""" + + # An important invariant maintained while a Task not done: + # + # - Either _fut_waiter is None, and _step() is scheduled; + # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # + # The only transition from the latter to the former is through + # _wakeup(). When _fut_waiter is not None, one of its callbacks + # must be _wakeup(). + + # Weak set containing all tasks alive. + _all_tasks = WeakSet() + + # Dictionary containing tasks that are currently active in + # all running event loops. {EventLoop: Task} + _current_tasks = {} + + # If False, don't log a message if the task is destroyed whereas its + # status is still pending + _log_destroy_pending = True + + @classmethod + def current_task(cls, loop=None): + """Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + """ + if loop is None: + loop = events.get_event_loop() + return cls._current_tasks.get(loop) + + @classmethod + def all_tasks(cls, loop=None): + """Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + """ + if loop is None: + loop = events.get_event_loop() + return set(t for t in cls._all_tasks if t._loop is loop) + + def __init__(self, coro, loop=None): + assert coroutines.iscoroutine(coro), repr(coro) + super(Task, self).__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._coro = coro + self._fut_waiter = None + self._must_cancel = False + self._loop.call_soon(self._step) + self.__class__._all_tasks.add(self) + + # On Python 3.3 or older, objects with a destructor that are part of a + # reference cycle are never destroyed. That's not the case any more on + # Python 3.4 thanks to the PEP 442. + if compat.PY34: + def __del__(self): + if self._state == futures._PENDING and self._log_destroy_pending: + context = { + 'task': self, + 'message': 'Task was destroyed but it is pending!', + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + futures.Future.__del__(self) + + def _repr_info(self): + info = super(Task, self)._repr_info() + + if self._must_cancel: + # replace status + info[0] = 'cancelling' + + coro = coroutines._format_coroutine(self._coro) + info.insert(1, 'coro=<%s>' % coro) + + if self._fut_waiter is not None: + info.insert(2, 'wait_for=%r' % self._fut_waiter) + return info + + def get_stack(self, limit=None): + """Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + """ + frames = [] + try: + # 'async def' coroutines + f = self._coro.cr_frame + except AttributeError: + f = self._coro.gi_frame + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif self._exception is not None: + tb = self._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + def print_stack(self, limit=None, file=None): + """Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + """ + extracted_list = [] + checked = set() + for f in self.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + exc = self._exception + if not extracted_list: + print('No stack for %r' % self, file=file) + elif exc is not None: + print('Traceback for %r (most recent call last):' % self, + file=file) + else: + print('Stack for %r (most recent call last):' % self, + file=file) + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') + + def cancel(self): + """Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + """ + if self.done(): + return False + if self._fut_waiter is not None: + if self._fut_waiter.cancel(): + # Leave self._fut_waiter; it may be a Task that + # catches and ignores the cancellation so we may have + # to cancel it again later. + return True + # It must be the case that self._step is already scheduled. + self._must_cancel = True + return True + + def _step(self, value=None, exc=None, exc_tb=None): + assert not self.done(), \ + '_step(): already done: {0!r}, {1!r}, {2!r}'.format(self, value, exc) + + if self._must_cancel: + if not isinstance(exc, futures.CancelledError): + exc = futures.CancelledError() + self._must_cancel = False + coro = self._coro + self._fut_waiter = None + + if exc_tb is not None: + init_exc = exc + else: + init_exc = None + self.__class__._current_tasks[self._loop] = self + # Call either coro.throw(exc) or coro.send(value). + try: + if exc is not None: + if exc_tb is not None: + result = coro.throw(exc, None, exc_tb) + else: + result = coro.throw(exc) + else: + result = coro.send(value) + # On Python 3.3 and Python 3.4, ReturnException is not used in + # practice. But this except is kept to have a single code base + # for all Python versions. + except coroutines.ReturnException as exc: + if isinstance(exc, ReturnException): + exc.raised = True + result = exc.value + else: + result = None + self.set_result(result) + except StopIteration as exc: + if compat.PY33: + # asyncio Task object? get the result of the coroutine + result = exc.value + else: + if isinstance(exc, ReturnException): + exc.raised = True + result = exc.value + else: + result = None + self.set_result(result) + except futures.CancelledError as exc: + super(Task, self).cancel() # I.e., Future.cancel(self). + except BaseException as exc: + if exc is init_exc: + self._set_exception_with_tb(exc, exc_tb) + exc_tb = None + else: + self.set_exception(exc) + + if not isinstance(exc, Exception): + # reraise BaseException + raise + else: + if coroutines._DEBUG: + if not coroutines._coroutine_at_yield_from(self._coro): + # trollius coroutine must "yield From(...)" + if not isinstance(result, coroutines.FromWrapper): + self._loop.call_soon( + self._step, None, + RuntimeError("yield used without From")) + return + result = result.obj + else: + # asyncio coroutine using "yield from ..." + if isinstance(result, coroutines.FromWrapper): + result = result.obj + elif isinstance(result, coroutines.FromWrapper): + result = result.obj + + if coroutines.iscoroutine(result): + # "yield coroutine" creates a task, the current task + # will wait until the new task is done + result = self._loop.create_task(result) + # FIXME: faster check. common base class? hasattr? + elif isinstance(result, (Lock, Condition, Semaphore)): + coro = _lock_coroutine(result) + result = self._loop.create_task(coro) + + if isinstance(result, futures._FUTURE_CLASSES): + # Yielded Future must come from Future.__iter__(). + result.add_done_callback(self._wakeup) + self._fut_waiter = result + if self._must_cancel: + if self._fut_waiter.cancel(): + self._must_cancel = False + elif result is None: + # Bare yield relinquishes control for one event loop iteration. + self._loop.call_soon(self._step) + else: + # Yielding something else is an error. + self._loop.call_soon( + self._step, None, + RuntimeError( + 'Task got bad yield: {0!r}'.format(result))) + finally: + self.__class__._current_tasks.pop(self._loop) + self = None # Needed to break cycles when an exception occurs. + + def _wakeup(self, future): + if (future._state == futures._FINISHED + and future._exception is not None): + # Get the traceback before calling exception(), because calling + # the exception() method clears the traceback + exc_tb = future._get_exception_tb() + exc = future.exception() + self._step(None, exc, exc_tb) + exc_tb = None + else: + try: + value = future.result() + except Exception as exc: + # This may also be a cancellation. + self._step(None, exc) + else: + self._step(value, None) + self = None # Needed to break cycles when an exception occurs. + + +# wait() and as_completed() similar to those in PEP 3148. + +# Export symbols in trollius.tasks for compatibility with asyncio +FIRST_COMPLETED = executor.FIRST_COMPLETED +FIRST_EXCEPTION = executor.FIRST_EXCEPTION +ALL_COMPLETED = executor.ALL_COMPLETED + + +@coroutine +def wait(fs, loop=None, timeout=None, return_when=ALL_COMPLETED): + """Wait for the Futures and coroutines given by fs to complete. + + The sequence futures must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = yield From(asyncio.wait(fs)) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + """ + if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs): + raise TypeError("expect a list of futures, not %s" % type(fs).__name__) + if not fs: + raise ValueError('Set of coroutines/Futures is empty.') + if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): + raise ValueError('Invalid return_when value: {0}'.format(return_when)) + + if loop is None: + loop = events.get_event_loop() + + fs = set(ensure_future(f, loop=loop) for f in set(fs)) + + result = yield From(_wait(fs, timeout, return_when, loop)) + raise Return(result) + + +def _release_waiter(waiter, *args): + if not waiter.done(): + waiter.set_result(None) + + +@coroutine +def wait_for(fut, timeout, loop=None): + """Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + """ + if loop is None: + loop = events.get_event_loop() + + if timeout is None: + result = yield From(fut) + raise Return(result) + + waiter = futures.Future(loop=loop) + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + cb = functools.partial(_release_waiter, waiter) + + fut = ensure_future(fut, loop=loop) + fut.add_done_callback(cb) + + try: + # wait until the future completes or the timeout + try: + yield From(waiter) + except futures.CancelledError: + fut.remove_done_callback(cb) + fut.cancel() + raise + + if fut.done(): + raise Return(fut.result()) + else: + fut.remove_done_callback(cb) + fut.cancel() + raise futures.TimeoutError() + finally: + timeout_handle.cancel() + + +@coroutine +def _wait(fs, timeout, return_when, loop): + """Internal helper for wait() and _wait_for(). + + The fs argument must be a collection of Futures. + """ + assert fs, 'Set of Futures is empty.' + waiter = futures.Future(loop=loop) + timeout_handle = None + if timeout is not None: + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + non_local = {'counter': len(fs)} + + def _on_completion(f): + non_local['counter'] -= 1 + if (non_local['counter'] <= 0 or + return_when == FIRST_COMPLETED or + return_when == FIRST_EXCEPTION and (not f.cancelled() and + f.exception() is not None)): + if timeout_handle is not None: + timeout_handle.cancel() + if not waiter.done(): + waiter.set_result(None) + + for f in fs: + f.add_done_callback(_on_completion) + + try: + yield From(waiter) + finally: + if timeout_handle is not None: + timeout_handle.cancel() + + done, pending = set(), set() + for f in fs: + f.remove_done_callback(_on_completion) + if f.done(): + done.add(f) + else: + pending.add(f) + raise Return(done, pending) + + +# This is *not* a @coroutine! It is just an iterator (yielding Futures). +def as_completed(fs, loop=None, timeout=None): + """Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = yield From(f) # The 'yield' may raise. + # Use result. + + If a timeout is specified, the 'yield' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + """ + if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs): + raise TypeError("expect a list of futures, not %s" % type(fs).__name__) + loop = loop if loop is not None else events.get_event_loop() + todo = set(ensure_future(f, loop=loop) for f in set(fs)) + from .queues import Queue # Import here to avoid circular import problem. + done = Queue(loop=loop) + timeout_handle = None + + def _on_timeout(): + for f in todo: + f.remove_done_callback(_on_completion) + done.put_nowait(None) # Queue a dummy value for _wait_for_one(). + todo.clear() # Can't do todo.remove(f) in the loop. + + def _on_completion(f): + if not todo: + return # _on_timeout() was here first. + todo.remove(f) + done.put_nowait(f) + if not todo and timeout_handle is not None: + timeout_handle.cancel() + + @coroutine + def _wait_for_one(): + f = yield From(done.get()) + if f is None: + # Dummy value from _on_timeout(). + raise futures.TimeoutError + raise Return(f.result()) # May raise f.exception(). + + for f in todo: + f.add_done_callback(_on_completion) + if todo and timeout is not None: + timeout_handle = loop.call_later(timeout, _on_timeout) + for _ in range(len(todo)): + yield _wait_for_one() + + +@coroutine +def sleep(delay, result=None, loop=None): + """Coroutine that completes after a given time (in seconds).""" + future = futures.Future(loop=loop) + h = future._loop.call_later(delay, + future._set_result_unless_cancelled, result) + try: + result = yield From(future) + raise Return(result) + finally: + h.cancel() + + +def async(coro_or_future, loop=None): + """Wrap a coroutine in a future. + + If the argument is a Future, it is returned directly. + + This function is deprecated in 3.5. Use asyncio.ensure_future() instead. + """ + + warnings.warn("asyncio.async() function is deprecated, use ensure_future()", + DeprecationWarning) + + return ensure_future(coro_or_future, loop=loop) + + +def ensure_future(coro_or_future, loop=None): + """Wrap a coroutine in a future. + + If the argument is a Future, it is returned directly. + """ + # FIXME: only check if coroutines._DEBUG is True? + if isinstance(coro_or_future, coroutines.FromWrapper): + coro_or_future = coro_or_future.obj + if isinstance(coro_or_future, futures._FUTURE_CLASSES): + if loop is not None and loop is not coro_or_future._loop: + raise ValueError('loop argument must agree with Future') + return coro_or_future + elif coroutines.iscoroutine(coro_or_future): + if loop is None: + loop = events.get_event_loop() + task = loop.create_task(coro_or_future) + if task._source_traceback: + del task._source_traceback[-1] + return task + else: + raise TypeError('A Future or coroutine is required') + + +class _GatheringFuture(futures.Future): + """Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + """ + + def __init__(self, children, loop=None): + super(_GatheringFuture, self).__init__(loop=loop) + self._children = children + + def cancel(self): + if self.done(): + return False + for child in self._children: + child.cancel() + return True + + +def gather(*coros_or_futures, **kw): + """Return a future aggregating results from the given coroutines + or futures. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + """ + loop = kw.pop('loop', None) + return_exceptions = kw.pop('return_exceptions', False) + if kw: + raise TypeError("unexpected keyword") + + if not coros_or_futures: + outer = futures.Future(loop=loop) + outer.set_result([]) + return outer + + arg_to_fut = {} + for arg in set(coros_or_futures): + if not isinstance(arg, futures._FUTURE_CLASSES): + fut = ensure_future(arg, loop=loop) + if loop is None: + loop = fut._loop + # The caller cannot control this future, the "destroy pending task" + # warning should not be emitted. + fut._log_destroy_pending = False + else: + fut = arg + if loop is None: + loop = fut._loop + elif fut._loop is not loop: + raise ValueError("futures are tied to different event loops") + arg_to_fut[arg] = fut + + children = [arg_to_fut[arg] for arg in coros_or_futures] + nchildren = len(children) + outer = _GatheringFuture(children, loop=loop) + non_local = {'nfinished': 0} + results = [None] * nchildren + + def _done_callback(i, fut): + if outer.done(): + if not fut.cancelled(): + # Mark exception retrieved. + fut.exception() + return + + if fut.cancelled(): + res = futures.CancelledError() + if not return_exceptions: + outer.set_exception(res) + return + elif fut._exception is not None: + res = fut.exception() # Mark exception retrieved. + if not return_exceptions: + outer.set_exception(res) + return + else: + res = fut._result + results[i] = res + non_local['nfinished'] += 1 + if non_local['nfinished'] == nchildren: + outer.set_result(results) + + for i, fut in enumerate(children): + fut.add_done_callback(functools.partial(_done_callback, i)) + return outer + + +def shield(arg, loop=None): + """Wait for a future, shielding it from cancellation. + + The statement + + res = yield From(shield(something())) + + is exactly equivalent to the statement + + res = yield From(something()) + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = yield From(shield(something())) + except CancelledError: + res = None + """ + inner = ensure_future(arg, loop=loop) + if inner.done(): + # Shortcut. + return inner + loop = inner._loop + outer = futures.Future(loop=loop) + + def _done_callback(inner): + if outer.cancelled(): + if not inner.cancelled(): + # Mark inner's result as retrieved. + inner.exception() + return + + if inner.cancelled(): + outer.cancel() + else: + exc = inner.exception() + if exc is not None: + outer.set_exception(exc) + else: + outer.set_result(inner.result()) + + inner.add_done_callback(_done_callback) + return outer diff --git a/trollius/test_support.py b/trollius/test_support.py new file mode 100644 index 00000000..b40576a4 --- /dev/null +++ b/trollius/test_support.py @@ -0,0 +1,309 @@ +# Subset of test.support from CPython 3.5, just what we need to run asyncio +# test suite. The code is copied from CPython 3.5 to not depend on the test +# module because it is rarely installed. + +# Ignore symbol TEST_HOME_DIR: test_events works without it + +from __future__ import absolute_import +import functools +import gc +import os +import platform +import re +import socket +import subprocess +import sys +import time + +from trollius import test_utils + +# A constant likely larger than the underlying OS pipe buffer size, to +# make writes blocking. +# Windows limit seems to be around 512 B, and many Unix kernels have a +# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure. +# (see issue #17835 for a discussion of this number). +PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1 + +def strip_python_stderr(stderr): + """Strip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + """ + stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip() + return stderr + + +# Executing the interpreter in a subprocess +def _assert_python(expected_success, *args, **env_vars): + if '__isolated' in env_vars: + isolated = env_vars.pop('__isolated') + else: + isolated = not env_vars + cmd_line = [sys.executable] + if sys.version_info >= (3, 3): + cmd_line.extend(('-X', 'faulthandler')) + if isolated and sys.version_info >= (3, 4): + # isolated mode: ignore Python environment variables, ignore user + # site-packages, and don't add the current directory to sys.path + cmd_line.append('-I') + elif not env_vars: + # ignore Python environment variables + cmd_line.append('-E') + # Need to preserve the original environment, for in-place testing of + # shared library builds. + env = os.environ.copy() + # But a special flag that can be set to override -- in this case, the + # caller is responsible to pass the full environment. + if env_vars.pop('__cleanenv', None): + env = {} + env.update(env_vars) + cmd_line.extend(args) + p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env) + try: + out, err = p.communicate() + finally: + subprocess._cleanup() + p.stdout.close() + p.stderr.close() + rc = p.returncode + err = strip_python_stderr(err) + if (rc and expected_success) or (not rc and not expected_success): + raise AssertionError( + "Process return code is %d, " + "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore'))) + return rc, out, err + + +def assert_python_ok(*args, **env_vars): + """ + Assert that running the interpreter with `args` and optional environment + variables `env_vars` succeeds (rc == 0) and return a (return code, stdout, + stderr) tuple. + + If the __cleanenv keyword is set, env_vars is used a fresh environment. + + Python is started in isolated mode (command line option -I), + except if the __isolated keyword is set to False. + """ + return _assert_python(True, *args, **env_vars) + + +is_jython = sys.platform.startswith('java') + +def gc_collect(): + """Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + """ + gc.collect() + if is_jython: + time.sleep(0.1) + gc.collect() + gc.collect() + + +HOST = "127.0.0.1" +HOSTv6 = "::1" + + +def _is_ipv6_enabled(): + """Check whether IPv6 is enabled on this host.""" + if socket.has_ipv6: + sock = None + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind((HOSTv6, 0)) + return True + except OSError: + pass + finally: + if sock: + sock.close() + return False + +IPV6_ENABLED = _is_ipv6_enabled() + + +def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM): + """Returns an unused port that should be suitable for binding. This is + achieved by creating a temporary socket with the same family and type as + the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to + the specified host address (defaults to 0.0.0.0) with the port set to 0, + eliciting an unused ephemeral port from the OS. The temporary socket is + then closed and deleted, and the ephemeral port is returned. + + Either this method or bind_port() should be used for any tests where a + server socket needs to be bound to a particular port for the duration of + the test. Which one to use depends on whether the calling code is creating + a python socket, or if an unused port needs to be provided in a constructor + or passed to an external program (i.e. the -accept argument to openssl's + s_server mode). Always prefer bind_port() over find_unused_port() where + possible. Hard coded ports should *NEVER* be used. As soon as a server + socket is bound to a hard coded port, the ability to run multiple instances + of the test simultaneously on the same host is compromised, which makes the + test a ticking time bomb in a buildbot environment. On Unix buildbots, this + may simply manifest as a failed test, which can be recovered from without + intervention in most cases, but on Windows, the entire python process can + completely and utterly wedge, requiring someone to log in to the buildbot + and manually kill the affected process. + + (This is easy to reproduce on Windows, unfortunately, and can be traced to + the SO_REUSEADDR socket option having different semantics on Windows versus + Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind, + listen and then accept connections on identical host/ports. An EADDRINUSE + OSError will be raised at some point (depending on the platform and + the order bind and listen were called on each socket). + + However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE + will ever be raised when attempting to bind two identical host/ports. When + accept() is called on each socket, the second caller's process will steal + the port from the first caller, leaving them both in an awkwardly wedged + state where they'll no longer respond to any signals or graceful kills, and + must be forcibly killed via OpenProcess()/TerminateProcess(). + + The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option + instead of SO_REUSEADDR, which effectively affords the same semantics as + SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open + Source world compared to Windows ones, this is a common mistake. A quick + look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when + openssl.exe is called with the 's_server' option, for example. See + http://bugs.python.org/issue2550 for more info. The following site also + has a very thorough description about the implications of both REUSEADDR + and EXCLUSIVEADDRUSE on Windows: + http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx) + + XXX: although this approach is a vast improvement on previous attempts to + elicit unused ports, it rests heavily on the assumption that the ephemeral + port returned to us by the OS won't immediately be dished back out to some + other process when we close and delete our temporary socket but before our + calling code has a chance to bind the returned port. We can deal with this + issue if/when we come across it. + """ + + tempsock = socket.socket(family, socktype) + port = bind_port(tempsock) + tempsock.close() + del tempsock + return port + +def bind_port(sock, host=HOST): + """Bind the socket to a free port and return the port number. Relies on + ephemeral ports in order to ensure we are using an unbound port. This is + important as many tests may be running simultaneously, especially in a + buildbot environment. This method raises an exception if the sock.family + is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR + or SO_REUSEPORT set on it. Tests should *never* set these socket options + for TCP/IP sockets. The only case for setting these options is testing + multicasting via multiple UDP sockets. + + Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. + on Windows), it will be set on the socket. This will prevent anyone else + from bind()'ing to our host/port for the duration of the test. + """ + + if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: + if hasattr(socket, 'SO_REUSEADDR'): + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: + raise TestFailed("tests should never set the SO_REUSEADDR " + "socket option on TCP/IP sockets!") + if hasattr(socket, 'SO_REUSEPORT'): + try: + reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) + if reuse == 1: + raise TestFailed("tests should never set the SO_REUSEPORT " + "socket option on TCP/IP sockets!") + except OSError: + # Python's socket module was compiled using modern headers + # thus defining SO_REUSEPORT but this process is running + # under an older kernel that does not support SO_REUSEPORT. + pass + if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + + sock.bind((host, 0)) + port = sock.getsockname()[1] + return port + +def requires_mac_ver(*min_version): + """Decorator raising SkipTest if the OS is Mac OS X and the OS X + version if less than min_version. + + For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version + is lesser than 10.5. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kw): + if sys.platform == 'darwin': + version_txt = platform.mac_ver()[0] + try: + version = tuple(map(int, version_txt.split('.'))) + except ValueError: + pass + else: + if version < min_version: + min_version_txt = '.'.join(map(str, min_version)) + raise test_utils.SkipTest( + "Mac OS X %s or higher required, not %s" + % (min_version_txt, version_txt)) + return func(*args, **kw) + wrapper.min_version = min_version + return wrapper + return decorator + +def _requires_unix_version(sysname, min_version): + """Decorator raising SkipTest if the OS is `sysname` and the version is + less than `min_version`. + + For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if + the FreeBSD version is less than 7.2. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kw): + if platform.system() == sysname: + version_txt = platform.release().split('-', 1)[0] + try: + version = tuple(map(int, version_txt.split('.'))) + except ValueError: + pass + else: + if version < min_version: + min_version_txt = '.'.join(map(str, min_version)) + raise test_utils.SkipTest( + "%s version %s or higher required, not %s" + % (sysname, min_version_txt, version_txt)) + return func(*args, **kw) + wrapper.min_version = min_version + return wrapper + return decorator + +def requires_freebsd_version(*min_version): + """Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version + is less than `min_version`. + + For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD + version is less than 7.2. + """ + return _requires_unix_version('FreeBSD', min_version) + +# Use test.support if available +try: + from test.support import * +except ImportError: + pass + +# Use test.script_helper if available +try: + from test.script_helper import assert_python_ok +except ImportError: + pass diff --git a/trollius/test_utils.py b/trollius/test_utils.py new file mode 100644 index 00000000..ebebb25c --- /dev/null +++ b/trollius/test_utils.py @@ -0,0 +1,563 @@ +"""Utilities shared by tests.""" + +import collections +import contextlib +import io +import logging +import os +import re +import socket +import sys +import tempfile +import threading +import time + +from wsgiref.simple_server import WSGIRequestHandler, WSGIServer + +import six + +try: + import socketserver + from http.server import HTTPServer +except ImportError: + # Python 2 + import SocketServer as socketserver + from BaseHTTPServer import HTTPServer + +try: + from unittest import mock +except ImportError: + # Python < 3.3 + import mock + +try: + import ssl + from .py3_ssl import SSLContext, wrap_socket +except ImportError: # pragma: no cover + # SSL support disabled in Python + ssl = None + +from . import base_events +from . import compat +from . import events +from . import futures +from . import selectors +from . import tasks +from .coroutines import coroutine +from .log import logger + + +if sys.platform == 'win32': # pragma: no cover + from .windows_utils import socketpair +else: + from socket import socketpair # pragma: no cover + +try: + # Prefer unittest2 if available (on Python 2) + import unittest2 as unittest +except ImportError: + import unittest + +skipIf = unittest.skipIf +skipUnless = unittest.skipUnless +SkipTest = unittest.SkipTest + + +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + class _BaseTestCaseContext: + + def __init__(self, test_case): + self.test_case = test_case + + def _raiseFailure(self, standardMsg): + msg = self.test_case._formatMessage(self.msg, standardMsg) + raise self.test_case.failureException(msg) + + + class _AssertRaisesBaseContext(_BaseTestCaseContext): + + def __init__(self, expected, test_case, callable_obj=None, + expected_regex=None): + _BaseTestCaseContext.__init__(self, test_case) + self.expected = expected + self.test_case = test_case + if callable_obj is not None: + try: + self.obj_name = callable_obj.__name__ + except AttributeError: + self.obj_name = str(callable_obj) + else: + self.obj_name = None + if isinstance(expected_regex, (bytes, str)): + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + self.msg = None + + def handle(self, name, callable_obj, args, kwargs): + """ + If callable_obj is None, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If callable_obj is not None, call it passing args and kwargs. + """ + if callable_obj is None: + self.msg = kwargs.pop('msg', None) + return self + with self: + callable_obj(*args, **kwargs) + + + class _AssertRaisesContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertRaises* methods.""" + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + if self.obj_name: + self._raiseFailure("{0} not raised by {1}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{0} not raised".format(exc_name)) + if not issubclass(exc_type, self.expected): + # let unexpected exceptions pass through + return False + self.exception = exc_value + if self.expected_regex is None: + return True + + expected_regex = self.expected_regex + if not expected_regex.search(str(exc_value)): + self._raiseFailure('"{0}" does not match "{1}"'.format( + expected_regex.pattern, str(exc_value))) + return True + + +def dummy_ssl_context(): + if ssl is None: + return None + else: + return SSLContext(ssl.PROTOCOL_SSLv23) + + +def run_briefly(loop, steps=1): + @coroutine + def once(): + pass + for step in range(steps): + gen = once() + t = loop.create_task(gen) + # Don't log a warning if the task is not done after run_until_complete(). + # It occurs if the loop is stopped or if a task raises a BaseException. + t._log_destroy_pending = False + try: + loop.run_until_complete(t) + finally: + gen.close() + + +def run_until(loop, pred, timeout=30): + deadline = time.time() + timeout + while not pred(): + if timeout is not None: + timeout = deadline - time.time() + if timeout <= 0: + raise futures.TimeoutError() + loop.run_until_complete(tasks.sleep(0.001, loop=loop)) + + +def run_once(loop): + """loop.stop() schedules _raise_stop_error() + and run_forever() runs until _raise_stop_error() callback. + this wont work if test waits for some IO events, because + _raise_stop_error() runs before any of io events callbacks. + """ + loop.stop() + loop.run_forever() + + +class SilentWSGIRequestHandler(WSGIRequestHandler): + + def get_stderr(self): + return io.StringIO() + + def log_message(self, format, *args): + pass + + +class SilentWSGIServer(WSGIServer, object): + + request_timeout = 2 + + def get_request(self): + request, client_addr = super(SilentWSGIServer, self).get_request() + request.settimeout(self.request_timeout) + return request, client_addr + + def handle_error(self, request, client_address): + pass + + +class SSLWSGIServerMixin: + + def finish_request(self, request, client_address): + # The relative location of our test directory (which + # contains the ssl key and certificate files) differs + # between the stdlib and stand-alone asyncio. + # Prefer our own if we can find it. + here = os.path.join(os.path.dirname(__file__), '..', 'tests') + if not os.path.isdir(here): + here = os.path.join(os.path.dirname(os.__file__), + 'test', 'test_asyncio') + keyfile = os.path.join(here, 'ssl_key.pem') + certfile = os.path.join(here, 'ssl_cert.pem') + ssock = wrap_socket(request, + keyfile=keyfile, + certfile=certfile, + server_side=True) + try: + self.RequestHandlerClass(ssock, client_address, self) + ssock.close() + except OSError: + # maybe socket has been closed by peer + pass + + +class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer): + pass + + +def _run_test_server(address, use_ssl, server_cls, server_ssl_cls): + + def app(environ, start_response): + status = '200 OK' + headers = [('Content-type', 'text/plain')] + start_response(status, headers) + return [b'Test message'] + + # Run the test WSGI server in a separate thread in order not to + # interfere with event handling in the main thread + server_class = server_ssl_cls if use_ssl else server_cls + httpd = server_class(address, SilentWSGIRequestHandler) + httpd.set_app(app) + httpd.address = httpd.server_address + server_thread = threading.Thread( + target=lambda: httpd.serve_forever(poll_interval=0.05)) + server_thread.start() + try: + yield httpd + finally: + httpd.shutdown() + httpd.server_close() + server_thread.join() + + +if hasattr(socket, 'AF_UNIX'): + + class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer, object): + + def server_bind(self): + socketserver.UnixStreamServer.server_bind(self) + self.server_name = '127.0.0.1' + self.server_port = 80 + + + class UnixWSGIServer(UnixHTTPServer, WSGIServer, object): + + request_timeout = 2 + + def server_bind(self): + UnixHTTPServer.server_bind(self) + self.setup_environ() + + def get_request(self): + request, client_addr = super(UnixWSGIServer, self).get_request() + request.settimeout(self.request_timeout) + # Code in the stdlib expects that get_request + # will return a socket and a tuple (host, port). + # However, this isn't true for UNIX sockets, + # as the second return value will be a path; + # hence we return some fake data sufficient + # to get the tests going + return request, ('127.0.0.1', '') + + + class SilentUnixWSGIServer(UnixWSGIServer): + + def handle_error(self, request, client_address): + pass + + + class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer): + pass + + + def gen_unix_socket_path(): + with tempfile.NamedTemporaryFile() as file: + return file.name + + + @contextlib.contextmanager + def unix_socket_path(): + path = gen_unix_socket_path() + try: + yield path + finally: + try: + os.unlink(path) + except OSError: + pass + + + @contextlib.contextmanager + def run_test_unix_server(use_ssl=False): + with unix_socket_path() as path: + for item in _run_test_server(address=path, use_ssl=use_ssl, + server_cls=SilentUnixWSGIServer, + server_ssl_cls=UnixSSLWSGIServer): + yield item + + +@contextlib.contextmanager +def run_test_server(host='127.0.0.1', port=0, use_ssl=False): + for item in _run_test_server(address=(host, port), use_ssl=use_ssl, + server_cls=SilentWSGIServer, + server_ssl_cls=SSLWSGIServer): + yield item + + +def make_test_protocol(base): + dct = {} + for name in dir(base): + if name.startswith('__') and name.endswith('__'): + # skip magic names + continue + dct[name] = MockCallback(return_value=None) + return type('TestProtocol', (base,) + base.__bases__, dct)() + + +class TestSelector(selectors.BaseSelector): + + def __init__(self): + self.keys = {} + + def register(self, fileobj, events, data=None): + key = selectors.SelectorKey(fileobj, 0, events, data) + self.keys[fileobj] = key + return key + + def unregister(self, fileobj): + return self.keys.pop(fileobj) + + def select(self, timeout): + return [] + + def get_map(self): + return self.keys + + +class TestLoop(base_events.BaseEventLoop): + """Loop for unittests. + + It manages self time directly. + If something scheduled to be executed later then + on next loop iteration after all ready handlers done + generator passed to __init__ is calling. + + Generator should be like this: + + def gen(): + ... + when = yield ... + ... = yield time_advance + + Value returned by yield is absolute time of next scheduled handler. + Value passed to yield is time advance to move loop's time forward. + """ + + def __init__(self, gen=None): + super(TestLoop, self).__init__() + + if gen is None: + def gen(): + yield + self._check_on_close = False + else: + self._check_on_close = True + + self._gen = gen() + next(self._gen) + self._time = 0 + self._clock_resolution = 1e-9 + self._timers = [] + self._selector = TestSelector() + + self.readers = {} + self.writers = {} + self.reset_counters() + + def time(self): + return self._time + + def advance_time(self, advance): + """Move test time forward.""" + if advance: + self._time += advance + + def close(self): + super(TestLoop, self).close() + if self._check_on_close: + try: + self._gen.send(0) + except StopIteration: + pass + else: # pragma: no cover + raise AssertionError("Time generator is not finished") + + def add_reader(self, fd, callback, *args): + self.readers[fd] = events.Handle(callback, args, self) + + def remove_reader(self, fd): + self.remove_reader_count[fd] += 1 + if fd in self.readers: + del self.readers[fd] + return True + else: + return False + + def assert_reader(self, fd, callback, *args): + assert fd in self.readers, 'fd {0} is not registered'.format(fd) + handle = self.readers[fd] + assert handle._callback == callback, '{0!r} != {1!r}'.format( + handle._callback, callback) + assert handle._args == args, '{0!r} != {1!r}'.format( + handle._args, args) + + def add_writer(self, fd, callback, *args): + self.writers[fd] = events.Handle(callback, args, self) + + def remove_writer(self, fd): + self.remove_writer_count[fd] += 1 + if fd in self.writers: + del self.writers[fd] + return True + else: + return False + + def assert_writer(self, fd, callback, *args): + assert fd in self.writers, 'fd {0} is not registered'.format(fd) + handle = self.writers[fd] + assert handle._callback == callback, '{0!r} != {1!r}'.format( + handle._callback, callback) + assert handle._args == args, '{0!r} != {1!r}'.format( + handle._args, args) + + def reset_counters(self): + self.remove_reader_count = collections.defaultdict(int) + self.remove_writer_count = collections.defaultdict(int) + + def _run_once(self): + super(TestLoop, self)._run_once() + for when in self._timers: + advance = self._gen.send(when) + self.advance_time(advance) + self._timers = [] + + def call_at(self, when, callback, *args): + self._timers.append(when) + return super(TestLoop, self).call_at(when, callback, *args) + + def _process_events(self, event_list): + return + + def _write_to_self(self): + pass + + +def MockCallback(**kwargs): + return mock.Mock(spec=['__call__'], **kwargs) + + +class MockPattern(str): + """A regex based str with a fuzzy __eq__. + + Use this helper with 'mock.assert_called_with', or anywhere + where a regex comparison between strings is needed. + + For instance: + mock_call.assert_called_with(MockPattern('spam.*ham')) + """ + def __eq__(self, other): + return bool(re.search(str(self), other, re.S)) + + +def get_function_source(func): + source = events._get_function_source(func) + if source is None: + raise ValueError("unable to get the source of %r" % (func,)) + return source + + +class TestCase(unittest.TestCase): + def set_event_loop(self, loop, cleanup=True): + assert loop is not None + # ensure that the event loop is passed explicitly in asyncio + events.set_event_loop(None) + if cleanup: + self.addCleanup(loop.close) + + def new_test_loop(self, gen=None): + loop = TestLoop(gen) + self.set_event_loop(loop) + return loop + + def tearDown(self): + events.set_event_loop(None) + + # Detect CPython bug #23353: ensure that yield/yield-from is not used + # in an except block of a generator + if sys.exc_info()[0] == SkipTest: + if six.PY2: + sys.exc_clear() + else: + pass #self.assertEqual(sys.exc_info(), (None, None, None)) + + def check_soure_traceback(self, source_traceback, lineno_delta): + frame = sys._getframe(1) + filename = frame.f_code.co_filename + lineno = frame.f_lineno + lineno_delta + name = frame.f_code.co_name + self.assertIsInstance(source_traceback, list) + self.assertEqual(source_traceback[-1][:3], + (filename, + lineno, + name)) + + +@contextlib.contextmanager +def disable_logger(): + """Context manager to disable asyncio logger. + + For example, it can be used to ignore warnings in debug mode. + """ + old_level = logger.level + try: + logger.setLevel(logging.CRITICAL+1) + yield + finally: + logger.setLevel(old_level) + +def mock_nonblocking_socket(): + """Create a mock of a non-blocking socket.""" + sock = mock.Mock(socket.socket) + sock.gettimeout.return_value = 0.0 + return sock + + +def force_legacy_ssl_support(): + return mock.patch('trollius.sslproto._is_sslproto_available', + return_value=False) diff --git a/trollius/time_monotonic.py b/trollius/time_monotonic.py new file mode 100644 index 00000000..e99364cc --- /dev/null +++ b/trollius/time_monotonic.py @@ -0,0 +1,192 @@ +""" +Backport of time.monotonic() of Python 3.3 (PEP 418) for Python 2.7. + +- time_monotonic(). This clock may or may not be monotonic depending on the + operating system. +- time_monotonic_resolution: Resolution of time_monotonic() clock in second + +Support Windows, Mac OS X, Linux, FreeBSD, OpenBSD and Solaris, but requires +the ctypes module. +""" +import os +import sys +from .log import logger +from .py33_exceptions import get_error_class + +__all__ = ('time_monotonic',) + +# default implementation: system clock (non monotonic!) +from time import time as time_monotonic +# the worst resolution is 15.6 ms on Windows +time_monotonic_resolution = 0.050 + +if os.name == "nt": + # Windows: use GetTickCount64() or GetTickCount() + try: + import ctypes + from ctypes import windll + from ctypes.wintypes import DWORD + except ImportError: + logger.error("time_monotonic import error", exc_info=True) + else: + # GetTickCount64() requires Windows Vista, Server 2008 or later + if hasattr(windll.kernel32, 'GetTickCount64'): + ULONGLONG = ctypes.c_uint64 + + GetTickCount64 = windll.kernel32.GetTickCount64 + GetTickCount64.restype = ULONGLONG + GetTickCount64.argtypes = () + + def time_monotonic(): + return GetTickCount64() * 1e-3 + time_monotonic_resolution = 1e-3 + else: + GetTickCount = windll.kernel32.GetTickCount + GetTickCount.restype = DWORD + GetTickCount.argtypes = () + + # Detect GetTickCount() integer overflow (32 bits, roll-over after 49.7 + # days). It increases an internal epoch (reference time) by 2^32 each + # time that an overflow is detected. The epoch is stored in the + # process-local state and so the value of time_monotonic() may be + # different in two Python processes running for more than 49 days. + def time_monotonic(): + ticks = GetTickCount() + if ticks < time_monotonic.last: + # Integer overflow detected + time_monotonic.delta += 2**32 + time_monotonic.last = ticks + return (ticks + time_monotonic.delta) * 1e-3 + time_monotonic.last = 0 + time_monotonic.delta = 0 + time_monotonic_resolution = 1e-3 + +elif sys.platform == 'darwin': + # Mac OS X: use mach_absolute_time() and mach_timebase_info() + try: + import ctypes + import ctypes.util + libc_name = ctypes.util.find_library('c') + except ImportError: + logger.error("time_monotonic import error", exc_info=True) + libc_name = None + if libc_name: + libc = ctypes.CDLL(libc_name, use_errno=True) + + mach_absolute_time = libc.mach_absolute_time + mach_absolute_time.argtypes = () + mach_absolute_time.restype = ctypes.c_uint64 + + class mach_timebase_info_data_t(ctypes.Structure): + _fields_ = ( + ('numer', ctypes.c_uint32), + ('denom', ctypes.c_uint32), + ) + mach_timebase_info_data_p = ctypes.POINTER(mach_timebase_info_data_t) + + mach_timebase_info = libc.mach_timebase_info + mach_timebase_info.argtypes = (mach_timebase_info_data_p,) + mach_timebase_info.restype = ctypes.c_int + + def time_monotonic(): + return mach_absolute_time() * time_monotonic.factor + + timebase = mach_timebase_info_data_t() + mach_timebase_info(ctypes.byref(timebase)) + time_monotonic.factor = float(timebase.numer) / timebase.denom * 1e-9 + time_monotonic_resolution = time_monotonic.factor + del timebase + +elif sys.platform.startswith(("linux", "freebsd", "openbsd", "sunos")): + # Linux, FreeBSD, OpenBSD: use clock_gettime(CLOCK_MONOTONIC) + # Solaris: use clock_gettime(CLOCK_HIGHRES) + + library = None + try: + import ctypes + import ctypes.util + except ImportError: + logger.error("time_monotonic import error", exc_info=True) + libraries = () + else: + if sys.platform.startswith(("freebsd", "openbsd")): + libraries = ('c',) + elif sys.platform.startswith("linux"): + # Linux: in glibc 2.17+, clock_gettime() is provided by the libc, + # on older versions, it is provided by librt + libraries = ('c', 'rt') + else: + # Solaris + libraries = ('rt',) + + for name in libraries: + filename = ctypes.util.find_library(name) + if not filename: + continue + library = ctypes.CDLL(filename, use_errno=True) + if not hasattr(library, 'clock_gettime'): + library = None + + if library is not None: + if sys.platform.startswith("openbsd"): + import platform + release = platform.release() + release = tuple(map(int, release.split('.'))) + if release >= (5, 5): + time_t = ctypes.c_int64 + else: + time_t = ctypes.c_int32 + else: + time_t = ctypes.c_long + clockid_t = ctypes.c_int + + class timespec(ctypes.Structure): + _fields_ = ( + ('tv_sec', time_t), + ('tv_nsec', ctypes.c_long), + ) + timespec_p = ctypes.POINTER(timespec) + + clock_gettime = library.clock_gettime + clock_gettime.argtypes = (clockid_t, timespec_p) + clock_gettime.restype = ctypes.c_int + + def ctypes_oserror(): + errno = ctypes.get_errno() + message = os.strerror(errno) + error_class = get_error_class(errno, OSError) + return error_class(errno, message) + + def time_monotonic(): + ts = timespec() + err = clock_gettime(time_monotonic.clk_id, ctypes.byref(ts)) + if err: + raise ctypes_oserror() + return ts.tv_sec + ts.tv_nsec * 1e-9 + + if sys.platform.startswith("linux"): + time_monotonic.clk_id = 1 # CLOCK_MONOTONIC + elif sys.platform.startswith("freebsd"): + time_monotonic.clk_id = 4 # CLOCK_MONOTONIC + elif sys.platform.startswith("openbsd"): + time_monotonic.clk_id = 3 # CLOCK_MONOTONIC + else: + assert sys.platform.startswith("sunos") + time_monotonic.clk_id = 4 # CLOCK_HIGHRES + + def get_resolution(): + _clock_getres = library.clock_getres + _clock_getres.argtypes = (clockid_t, timespec_p) + _clock_getres.restype = ctypes.c_int + + ts = timespec() + err = _clock_getres(time_monotonic.clk_id, ctypes.byref(ts)) + if err: + raise ctypes_oserror() + return ts.tv_sec + ts.tv_nsec * 1e-9 + time_monotonic_resolution = get_resolution() + del get_resolution + +else: + logger.error("time_monotonic: unspported platform %r", sys.platform) + diff --git a/trollius/transports.py b/trollius/transports.py new file mode 100644 index 00000000..1f086c1a --- /dev/null +++ b/trollius/transports.py @@ -0,0 +1,294 @@ +"""Abstract Transport class.""" + +from trollius import compat + +__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport', + 'Transport', 'DatagramTransport', 'SubprocessTransport', + ] + + +class BaseTransport(object): + """Base class for transports.""" + + def __init__(self, extra=None): + if extra is None: + extra = {} + self._extra = extra + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._extra.get(name, default) + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + raise NotImplementedError + + +class ReadTransport(BaseTransport): + """Interface for read-only transports.""" + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + raise NotImplementedError + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + raise NotImplementedError + + +class WriteTransport(BaseTransport): + """Interface for write-only transports.""" + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to a + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + raise NotImplementedError + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + raise NotImplementedError + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + raise NotImplementedError + + def writelines(self, list_of_data): + """Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + """ + data = compat.flatten_list_bytes(list_of_data) + self.write(data) + + def write_eof(self): + """Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + """ + raise NotImplementedError + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class Transport(ReadTransport, WriteTransport): + """Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + """ + + +class DatagramTransport(BaseTransport): + """Interface for datagram (UDP) transports.""" + + def sendto(self, data, addr=None): + """Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + """ + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class SubprocessTransport(BaseTransport): + + def get_pid(self): + """Get subprocess id.""" + raise NotImplementedError + + def get_returncode(self): + """Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + """ + raise NotImplementedError + + def get_pipe_transport(self, fd): + """Get transport for pipe with number fd.""" + raise NotImplementedError + + def send_signal(self, signal): + """Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + """ + raise NotImplementedError + + def terminate(self): + """Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + """ + raise NotImplementedError + + def kill(self): + """Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + """ + raise NotImplementedError + + +class _FlowControlMixin(Transport): + """All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super(Class, self).__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + """ + + def __init__(self, extra=None, loop=None): + super(_FlowControlMixin, self).__init__(extra) + assert loop is not None + self._loop = loop + self._protocol_paused = False + self._set_write_buffer_limits() + + def _maybe_pause_protocol(self): + size = self.get_write_buffer_size() + if size <= self._high_water: + return + if not self._protocol_paused: + self._protocol_paused = True + try: + self._protocol.pause_writing() + except Exception as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def _maybe_resume_protocol(self): + if (self._protocol_paused and + self.get_write_buffer_size() <= self._low_water): + self._protocol_paused = False + try: + self._protocol.resume_writing() + except Exception as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def get_write_buffer_limits(self): + return (self._low_water, self._high_water) + + def _set_write_buffer_limits(self, high=None, low=None): + if high is None: + if low is None: + high = 64*1024 + else: + high = 4*low + if low is None: + low = high // 4 + if not high >= low >= 0: + raise ValueError('high (%r) must be >= low (%r) must be >= 0' % + (high, low)) + self._high_water = high + self._low_water = low + + def set_write_buffer_limits(self, high=None, low=None): + self._set_write_buffer_limits(high=high, low=low) + self._maybe_pause_protocol() + + def get_write_buffer_size(self): + raise NotImplementedError diff --git a/trollius/unix_events.py b/trollius/unix_events.py new file mode 100644 index 00000000..cdefacad --- /dev/null +++ b/trollius/unix_events.py @@ -0,0 +1,1034 @@ +"""Selector event loop for Unix with signal handling.""" +from __future__ import absolute_import + +import errno +import os +import signal +import socket +import stat +import subprocess +import sys +import threading +import warnings + + +from . import base_events +from . import base_subprocess +from . import compat +from . import constants +from . import coroutines +from . import events +from . import futures +from . import selector_events +from . import selectors +from . import transports +from .compat import flatten_bytes +from .coroutines import coroutine, From, Return +from .log import logger +from .py33_exceptions import ( + reraise, wrap_error, + BlockingIOError, BrokenPipeError, ConnectionResetError, + InterruptedError, ChildProcessError) + + +__all__ = ['SelectorEventLoop', + 'AbstractChildWatcher', 'SafeChildWatcher', + 'FastChildWatcher', 'DefaultEventLoopPolicy', + ] + +if sys.platform == 'win32': # pragma: no cover + raise ImportError('Signals are not really supported on Windows') + + +if compat.PY33: + def _sighandler_noop(signum, frame): + """Dummy signal handler.""" + pass + + +class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + """ + + def __init__(self, selector=None): + super(_UnixSelectorEventLoop, self).__init__(selector) + self._signal_handlers = {} + + def _socketpair(self): + return socket.socketpair() + + def close(self): + super(_UnixSelectorEventLoop, self).close() + for sig in list(self._signal_handlers): + self.remove_signal_handler(sig) + + # On Python <= 3.2, the C signal handler of Python writes a null byte into + # the wakeup file descriptor. We cannot retrieve the signal numbers from + # the file descriptor. + if compat.PY33: + def _process_self_data(self, data): + for signum in data: + if not signum: + # ignore null bytes written by _write_to_self() + continue + self._handle_signal(signum) + + def add_signal_handler(self, sig, callback, *args): + """Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if (coroutines.iscoroutine(callback) + or coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used " + "with add_signal_handler()") + self._check_signal(sig) + self._check_closed() + try: + # set_wakeup_fd() raises ValueError if this is not the + # main thread. By calling it early we ensure that an + # event loop running in another thread cannot add a signal + # handler. + signal.set_wakeup_fd(self._csock.fileno()) + except (ValueError, OSError) as exc: + raise RuntimeError(str(exc)) + + handle = events.Handle(callback, args, self) + self._signal_handlers[sig] = handle + + try: + if compat.PY33: + # On Python 3.3 and newer, the C signal handler writes the + # signal number into the wakeup file descriptor and then calls + # Py_AddPendingCall() to schedule the Python signal handler. + # + # Register a dummy signal handler to ask Python to write the + # signal number into the wakup file descriptor. + # _process_self_data() will read signal numbers from this file + # descriptor to handle signals. + signal.signal(sig, _sighandler_noop) + else: + # On Python 3.2 and older, the C signal handler first calls + # Py_AddPendingCall() to schedule the Python signal handler, + # and then write a null byte into the wakeup file descriptor. + signal.signal(sig, self._handle_signal) + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(sig, False) + except (RuntimeError, OSError) as exc: + # On Python 2, signal.signal(signal.SIGKILL, signal.SIG_IGN) raises + # RuntimeError(22, 'Invalid argument'). On Python 3, + # OSError(22, 'Invalid argument') is raised instead. + exc_type, exc_value, tb = sys.exc_info() + + del self._signal_handlers[sig] + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as nexc: + logger.info('set_wakeup_fd(-1) failed: %s', nexc) + + if isinstance(exc, RuntimeError) or exc.errno == errno.EINVAL: + raise RuntimeError('sig {0} cannot be caught'.format(sig)) + else: + reraise(exc_type, exc_value, tb) + + def _handle_signal(self, sig, frame=None): + """Internal helper that is the actual signal handler.""" + handle = self._signal_handlers.get(sig) + if handle is None: + return # Assume it's some race condition. + if handle._cancelled: + self.remove_signal_handler(sig) # Remove it properly. + else: + self._add_callback_signalsafe(handle) + + def remove_signal_handler(self, sig): + """Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + """ + self._check_signal(sig) + try: + del self._signal_handlers[sig] + except KeyError: + return False + + if sig == signal.SIGINT: + handler = signal.default_int_handler + else: + handler = signal.SIG_DFL + + try: + signal.signal(sig, handler) + except OSError as exc: + if exc.errno == errno.EINVAL: + raise RuntimeError('sig {0} cannot be caught'.format(sig)) + else: + raise + + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as exc: + logger.info('set_wakeup_fd(-1) failed: %s', exc) + + return True + + def _check_signal(self, sig): + """Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if not isinstance(sig, int): + raise TypeError('sig must be an int, not {0!r}'.format(sig)) + + if not (1 <= sig < signal.NSIG): + raise ValueError( + 'sig {0} out of range(1, {1})'.format(sig, signal.NSIG)) + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra) + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + with events.get_child_watcher() as watcher: + waiter = futures.Future(loop=self) + transp = _UnixSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + + watcher.add_child_handler(transp.get_pid(), + self._child_watcher_callback, transp) + try: + yield From(waiter) + except Exception as exc: + # Workaround CPython bug #23353: using yield/yield-from in an + # except block of a generator doesn't clear properly + # sys.exc_info() + err = exc + else: + err = None + + if err is not None: + transp.close() + yield From(transp._wait()) + raise err + + raise Return(transp) + + def _child_watcher_callback(self, pid, returncode, transp): + self.call_soon_threadsafe(transp._process_exited, returncode) + + @coroutine + def create_unix_connection(self, protocol_factory, path, + ssl=None, sock=None, + server_hostname=None): + assert server_hostname is None or isinstance(server_hostname, str) + if ssl: + if server_hostname is None: + raise ValueError( + 'you have to pass server_hostname when using ssl') + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + try: + sock.setblocking(False) + yield From(self.sock_connect(sock, path)) + except: + sock.close() + raise + + else: + if sock is None: + raise ValueError('no path and sock were specified') + sock.setblocking(False) + + transport, protocol = yield From(self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname)) + raise Return(transport, protocol) + + @coroutine + def create_unix_server(self, protocol_factory, path=None, + sock=None, backlog=100, ssl=None): + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + try: + sock.bind(path) + except socket.error as exc: + sock.close() + if exc.errno == errno.EADDRINUSE: + # Let's improve the error message by adding + # with what exact address it occurs. + msg = 'Address {0!r} is already in use'.format(path) + raise OSError(errno.EADDRINUSE, msg) + else: + raise + except: + sock.close() + raise + else: + if sock is None: + raise ValueError( + 'path was not specified, and no sock specified') + + if sock.family != socket.AF_UNIX: + raise ValueError( + 'A UNIX Domain Socket was expected, got {0!r}'.format(sock)) + + server = base_events.Server(self, [sock]) + sock.listen(backlog) + sock.setblocking(False) + self._start_serving(protocol_factory, sock, ssl, server) + return server + + +if hasattr(os, 'set_blocking'): + # Python 3.5 and newer + def _set_nonblocking(fd): + os.set_blocking(fd, False) +else: + import fcntl + + def _set_nonblocking(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + flags = flags | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + + +class _UnixReadPipeTransport(transports.ReadTransport): + + max_size = 256 * 1024 # max bytes we read in one event loop iteration + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super(_UnixReadPipeTransport, self).__init__(extra) + self._extra['pipe'] = pipe + self._loop = loop + self._pipe = pipe + self._fileno = pipe.fileno() + mode = os.fstat(self._fileno).st_mode + if not (stat.S_ISFIFO(mode) or + stat.S_ISSOCK(mode) or + stat.S_ISCHR(mode)): + raise ValueError("Pipe transport is for pipes/sockets only.") + _set_nonblocking(self._fileno) + self._protocol = protocol + self._closing = False + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop.add_reader, + self._fileno, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(waiter._set_result_unless_cancelled, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._fileno) + if self._pipe is not None: + polling = selector_events._test_selector_event( + self._loop._selector, + self._fileno, selectors.EVENT_READ) + if polling: + info.append('polling') + else: + info.append('idle') + else: + info.append('closed') + return '<%s>' % ' '.join(info) + + def _read_ready(self): + try: + data = wrap_error(os.read, self._fileno, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + self._closing = True + self._loop.remove_reader(self._fileno) + self._loop.call_soon(self._protocol.eof_received) + self._loop.call_soon(self._call_connection_lost, None) + + def pause_reading(self): + self._loop.remove_reader(self._fileno) + + def resume_reading(self): + self._loop.add_reader(self._fileno, self._read_ready) + + def close(self): + if not self._closing: + self._close(None) + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._pipe is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self._pipe.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if (isinstance(exc, OSError) and exc.errno == errno.EIO): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc): + self._closing = True + self._loop.remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixWritePipeTransport(transports._FlowControlMixin, + transports.WriteTransport): + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super(_UnixWritePipeTransport, self).__init__(extra, loop) + self._extra['pipe'] = pipe + self._pipe = pipe + self._fileno = pipe.fileno() + mode = os.fstat(self._fileno).st_mode + is_socket = stat.S_ISSOCK(mode) + if not (is_socket or + stat.S_ISFIFO(mode) or + stat.S_ISCHR(mode)): + raise ValueError("Pipe transport is only for " + "pipes, sockets and character devices") + _set_nonblocking(self._fileno) + self._protocol = protocol + self._buffer = [] + self._conn_lost = 0 + self._closing = False # Set when close() or write_eof() called. + + self._loop.call_soon(self._protocol.connection_made, self) + + # On AIX, the reader trick (to be notified when the read end of the + # socket is closed) only works for sockets. On other platforms it + # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.) + if is_socket or not sys.platform.startswith("aix"): + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop.add_reader, + self._fileno, self._read_ready) + + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(waiter._set_result_unless_cancelled, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append('fd=%s' % self._fileno) + if self._pipe is not None: + polling = selector_events._test_selector_event( + self._loop._selector, + self._fileno, selectors.EVENT_WRITE) + if polling: + info.append('polling') + else: + info.append('idle') + + bufsize = self.get_write_buffer_size() + info.append('bufsize=%s' % bufsize) + else: + info.append('closed') + return '<%s>' % ' '.join(info) + + def get_write_buffer_size(self): + return sum(len(data) for data in self._buffer) + + def _read_ready(self): + # Pipe was closed by peer. + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + if self._buffer: + self._close(BrokenPipeError()) + else: + self._close() + + def write(self, data): + data = flatten_bytes(data) + if not data: + return + + if self._conn_lost or self._closing: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('pipe closed by peer or ' + 'os.write(pipe, data) raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + n = wrap_error(os.write, self._fileno, data) + except (BlockingIOError, InterruptedError): + n = 0 + except Exception as exc: + self._conn_lost += 1 + self._fatal_error(exc, 'Fatal write error on pipe transport') + return + if n == len(data): + return + elif n > 0: + data = data[n:] + self._loop.add_writer(self._fileno, self._write_ready) + + self._buffer.append(data) + self._maybe_pause_protocol() + + def _write_ready(self): + data = b''.join(self._buffer) + assert data, 'Data should not be empty' + + del self._buffer[:] + try: + n = wrap_error(os.write, self._fileno, data) + except (BlockingIOError, InterruptedError): + self._buffer.append(data) + except Exception as exc: + self._conn_lost += 1 + # Remove writer here, _fatal_error() doesn't it + # because _buffer is empty. + self._loop.remove_writer(self._fileno) + self._fatal_error(exc, 'Fatal write error on pipe transport') + else: + if n == len(data): + self._loop.remove_writer(self._fileno) + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer and self._closing: + self._loop.remove_reader(self._fileno) + self._call_connection_lost(None) + return + elif n > 0: + data = data[n:] + + self._buffer.append(data) # Try again later. + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing: + return + assert self._pipe + self._closing = True + if not self._buffer: + self._loop.remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, None) + + def close(self): + if self._pipe is not None and not self._closing: + # write_eof is all what we needed to close the write pipe + self.write_eof() + + # On Python 3.3 and older, objects with a destructor part of a reference + # cycle are never destroyed. It's not more the case on Python 3.4 thanks + # to the PEP 442. + if compat.PY34: + def __del__(self): + if self._pipe is not None: + warnings.warn("unclosed transport %r" % self, ResourceWarning) + self._pipe.close() + + def abort(self): + self._close(None) + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc=None): + self._closing = True + if self._buffer: + self._loop.remove_writer(self._fileno) + del self._buffer[:] + self._loop.remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +if hasattr(os, 'set_inheritable'): + # Python 3.4 and newer + _set_inheritable = os.set_inheritable +else: + import fcntl + + def _set_inheritable(fd, inheritable): + cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1) + + old = fcntl.fcntl(fd, fcntl.F_GETFD) + if not inheritable: + fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag) + else: + fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag) + + +class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + stdin_w = None + if stdin == subprocess.PIPE: + # Use a socket pair for stdin, since not all platforms + # support selecting read events on the write end of a + # socket (which we use in order to detect closing of the + # other end). Notably this is needed on AIX, and works + # just fine on other platforms. + stdin, stdin_w = self._loop._socketpair() + + # Mark the write end of the stdin pipe as non-inheritable, + # needed by close_fds=False on Python 3.3 and older + # (Python 3.4 implements the PEP 446, socketpair returns + # non-inheritable sockets) + _set_inheritable(stdin_w.fileno(), False) + self._proc = subprocess.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + universal_newlines=False, bufsize=bufsize, **kwargs) + if stdin_w is not None: + # Retrieve the file descriptor from stdin_w, stdin_w should not + # "own" the file descriptor anymore: closing stdin_fd file + # descriptor must close immediatly the file + stdin.close() + if hasattr(stdin_w, 'detach'): + stdin_fd = stdin_w.detach() + self._proc.stdin = os.fdopen(stdin_fd, 'wb', bufsize) + else: + stdin_dup = os.dup(stdin_w.fileno()) + stdin_w.close() + self._proc.stdin = os.fdopen(stdin_dup, 'wb', bufsize) + + +class AbstractChildWatcher(object): + """Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + """ + + def add_child_handler(self, pid, callback, *args): + """Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + """ + raise NotImplementedError() + + def remove_child_handler(self, pid): + """Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.""" + + raise NotImplementedError() + + def attach_loop(self, loop): + """Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + """ + raise NotImplementedError() + + def close(self): + """Close the watcher. + + This must be called to make sure that any underlying resource is freed. + """ + raise NotImplementedError() + + def __enter__(self): + """Enter the watcher's context and allow starting new processes + + This function must return self""" + raise NotImplementedError() + + def __exit__(self, a, b, c): + """Exit the watcher's context""" + raise NotImplementedError() + + +class BaseChildWatcher(AbstractChildWatcher): + + def __init__(self): + self._loop = None + + def close(self): + self.attach_loop(None) + + def _do_waitpid(self, expected_pid): + raise NotImplementedError() + + def _do_waitpid_all(self): + raise NotImplementedError() + + def attach_loop(self, loop): + assert loop is None or isinstance(loop, events.AbstractEventLoop) + + if self._loop is not None: + self._loop.remove_signal_handler(signal.SIGCHLD) + + self._loop = loop + if loop is not None: + loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) + + # Prevent a race condition in case a child terminated + # during the switch. + self._do_waitpid_all() + + def _sig_chld(self): + try: + self._do_waitpid_all() + except Exception as exc: + # self._loop should always be available here + # as '_sig_chld' is added as a signal handler + # in 'attach_loop' + self._loop.call_exception_handler({ + 'message': 'Unknown exception in SIGCHLD handler', + 'exception': exc, + }) + + def _compute_returncode(self, status): + if os.WIFSIGNALED(status): + # The child process died because of a signal. + return -os.WTERMSIG(status) + elif os.WIFEXITED(status): + # The child process exited (e.g sys.exit()). + return os.WEXITSTATUS(status) + else: + # The child exited, but we don't understand its status. + # This shouldn't happen, but if it does, let's just + # return that status; perhaps that helps debug it. + return status + + +class SafeChildWatcher(BaseChildWatcher): + """'Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + """ + + def __init__(self): + super(SafeChildWatcher, self).__init__() + self._callbacks = {} + + def close(self): + self._callbacks.clear() + super(SafeChildWatcher, self).close() + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def add_child_handler(self, pid, callback, *args): + self._callbacks[pid] = (callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + if pid == 0: + # The child process is still alive. + return + + returncode = self._compute_returncode(status) + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + try: + callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + if self._loop.get_debug(): + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + callback(pid, returncode, *args) + + +class FastChildWatcher(BaseChildWatcher): + """'Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + """ + def __init__(self): + super(FastChildWatcher, self).__init__() + self._callbacks = {} + self._lock = threading.Lock() + self._zombies = {} + self._forks = 0 + + def close(self): + self._callbacks.clear() + self._zombies.clear() + super(FastChildWatcher, self).close() + + def __enter__(self): + with self._lock: + self._forks += 1 + + return self + + def __exit__(self, a, b, c): + with self._lock: + self._forks -= 1 + + if self._forks or not self._zombies: + return + + collateral_victims = str(self._zombies) + self._zombies.clear() + + logger.warning( + "Caught subprocesses termination from unknown pids: %s", + collateral_victims) + + def add_child_handler(self, pid, callback, *args): + assert self._forks, "Must use the context manager" + with self._lock: + try: + returncode = self._zombies.pop(pid) + except KeyError: + # The child is running. + self._callbacks[pid] = callback, args + return + + # The child is dead already. We can fire the callback. + callback(pid, returncode, *args) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + # Because of signal coalescing, we must keep calling waitpid() as + # long as we're able to reap a child. + while True: + try: + pid, status = wrap_error(os.waitpid, -1, os.WNOHANG) + except ChildProcessError: + # No more child processes exist. + return + else: + if pid == 0: + # A child process is still alive. + return + + returncode = self._compute_returncode(status) + + with self._lock: + try: + callback, args = self._callbacks.pop(pid) + except KeyError: + # unknown child + if self._forks: + # It may not be registered yet. + self._zombies[pid] = returncode + if self._loop.get_debug(): + logger.debug('unknown process %s exited ' + 'with returncode %s', + pid, returncode) + continue + callback = None + else: + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + pid, returncode) + + if callback is None: + logger.warning( + "Caught subprocess termination from unknown pid: " + "%d -> %d", pid, returncode) + else: + callback(pid, returncode, *args) + + +class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + """UNIX event loop policy with a watcher for child processes.""" + _loop_factory = _UnixSelectorEventLoop + + def __init__(self): + super(_UnixDefaultEventLoopPolicy, self).__init__() + self._watcher = None + + def _init_watcher(self): + with events._lock: + if self._watcher is None: # pragma: no branch + self._watcher = SafeChildWatcher() + if isinstance(threading.current_thread(), + threading._MainThread): + self._watcher.attach_loop(self._local._loop) + + def set_event_loop(self, loop): + """Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + """ + + super(_UnixDefaultEventLoopPolicy, self).set_event_loop(loop) + + if self._watcher is not None and \ + isinstance(threading.current_thread(), threading._MainThread): + self._watcher.attach_loop(loop) + + def get_child_watcher(self): + """Get the watcher for child processes. + + If not yet set, a SafeChildWatcher object is automatically created. + """ + if self._watcher is None: + self._init_watcher() + + return self._watcher + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + + assert watcher is None or isinstance(watcher, AbstractChildWatcher) + + if self._watcher is not None: + self._watcher.close() + + self._watcher = watcher + +SelectorEventLoop = _UnixSelectorEventLoop +DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy diff --git a/trollius/windows_events.py b/trollius/windows_events.py new file mode 100644 index 00000000..3102d230 --- /dev/null +++ b/trollius/windows_events.py @@ -0,0 +1,777 @@ +"""Selector and proactor event loops for Windows.""" + +import errno +import math +import socket +import struct +import weakref + +from . import events +from . import base_subprocess +from . import futures +from . import proactor_events +from . import py33_winapi as _winapi +from . import selector_events +from . import tasks +from . import windows_utils +from . import _overlapped +from .coroutines import coroutine, From, Return +from .log import logger +from .py33_exceptions import wrap_error, BrokenPipeError, ConnectionResetError + + +__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', + 'DefaultEventLoopPolicy', + ] + + +NULL = 0 +INFINITE = 0xffffffff +ERROR_CONNECTION_REFUSED = 1225 +ERROR_CONNECTION_ABORTED = 1236 + +# Initial delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_INIT_DELAY = 0.001 + +# Maximum delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_MAX_DELAY = 0.100 + + +class _OverlappedFuture(futures.Future): + """Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + """ + + def __init__(self, ov, loop=None): + super(_OverlappedFuture, self).__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._ov = ov + + def _repr_info(self): + info = super(_OverlappedFuture, self)._repr_info() + if self._ov is not None: + state = 'pending' if self._ov.pending else 'completed' + info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address)) + return info + + def _cancel_overlapped(self): + if self._ov is None: + return + try: + self._ov.cancel() + except OSError as exc: + context = { + 'message': 'Cancelling an overlapped future failed', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self._ov = None + + def cancel(self): + self._cancel_overlapped() + return super(_OverlappedFuture, self).cancel() + + def set_exception(self, exception): + super(_OverlappedFuture, self).set_exception(exception) + self._cancel_overlapped() + + def set_result(self, result): + super(_OverlappedFuture, self).set_result(result) + self._ov = None + + +class _BaseWaitHandleFuture(futures.Future): + """Subclass of Future which represents a wait handle.""" + + def __init__(self, ov, handle, wait_handle, loop=None): + super(_BaseWaitHandleFuture, self).__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + # Keep a reference to the Overlapped object to keep it alive until the + # wait is unregistered + self._ov = ov + self._handle = handle + self._wait_handle = wait_handle + + # Should we call UnregisterWaitEx() if the wait completes + # or is cancelled? + self._registered = True + + def _poll(self): + # non-blocking wait: use a timeout of 0 millisecond + return (_winapi.WaitForSingleObject(self._handle, 0) == + _winapi.WAIT_OBJECT_0) + + def _repr_info(self): + info = super(_BaseWaitHandleFuture, self)._repr_info() + info.append('handle=%#x' % self._handle) + if self._handle is not None: + state = 'signaled' if self._poll() else 'waiting' + info.append(state) + if self._wait_handle is not None: + info.append('wait_handle=%#x' % self._wait_handle) + return info + + def _unregister_wait_cb(self, fut): + # The wait was unregistered: it's not safe to destroy the Overlapped + # object + self._ov = None + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWait(wait_handle) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING means that the unregister is pending + + self._unregister_wait_cb(None) + + def cancel(self): + self._unregister_wait() + return super(_BaseWaitHandleFuture, self).cancel() + + def set_exception(self, exception): + self._unregister_wait() + super(_BaseWaitHandleFuture, self).set_exception(exception) + + def set_result(self, result): + self._unregister_wait() + super(_BaseWaitHandleFuture, self).set_result(result) + + +class _WaitCancelFuture(_BaseWaitHandleFuture): + """Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + """ + + def __init__(self, ov, event, wait_handle, loop=None): + super(_WaitCancelFuture, self).__init__(ov, event, wait_handle, + loop=loop) + + self._done_callback = None + + def cancel(self): + raise RuntimeError("_WaitCancelFuture must not be cancelled") + + def _schedule_callbacks(self): + super(_WaitCancelFuture, self)._schedule_callbacks() + if self._done_callback is not None: + self._done_callback(self) + + +class _WaitHandleFuture(_BaseWaitHandleFuture): + def __init__(self, ov, handle, wait_handle, proactor, loop=None): + super(_WaitHandleFuture, self).__init__(ov, handle, wait_handle, + loop=loop) + self._proactor = proactor + self._unregister_proactor = True + self._event = _overlapped.CreateEvent(None, True, False, None) + self._event_fut = None + + def _unregister_wait_cb(self, fut): + if self._event is not None: + _winapi.CloseHandle(self._event) + self._event = None + self._event_fut = None + + # If the wait was cancelled, the wait may never be signalled, so + # it's required to unregister it. Otherwise, IocpProactor.close() will + # wait forever for an event which will never come. + # + # If the IocpProactor already received the event, it's safe to call + # _unregister() because we kept a reference to the Overlapped object + # which is used as an unique key. + self._proactor._unregister(self._ov) + self._proactor = None + + super(_WaitHandleFuture, self)._unregister_wait_cb(fut) + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWaitEx(wait_handle, self._event) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING is not an error, the wait was unregistered + + self._event_fut = self._proactor._wait_cancel(self._event, + self._unregister_wait_cb) + + +class PipeServer(object): + """Class representing a pipe server. + + This is much like a bound, listening socket. + """ + def __init__(self, address): + self._address = address + self._free_instances = weakref.WeakSet() + # initialize the pipe attribute before calling _server_pipe_handle() + # because this function can raise an exception and the destructor calls + # the close() method + self._pipe = None + self._accept_pipe_future = None + self._pipe = self._server_pipe_handle(True) + + def _get_unconnected_pipe(self): + # Create new instance and return previous one. This ensures + # that (until the server is closed) there is always at least + # one pipe handle for address. Therefore if a client attempt + # to connect it will not fail with FileNotFoundError. + tmp, self._pipe = self._pipe, self._server_pipe_handle(False) + return tmp + + def _server_pipe_handle(self, first): + # Return a wrapper for a new pipe handle. + if self.closed(): + return None + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + h = wrap_error(_winapi.CreateNamedPipe, + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, + windows_utils.BUFSIZE, windows_utils.BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + pipe = windows_utils.PipeHandle(h) + self._free_instances.add(pipe) + return pipe + + def closed(self): + return (self._address is None) + + def close(self): + if self._accept_pipe_future is not None: + self._accept_pipe_future.cancel() + self._accept_pipe_future = None + # Close all instances which have not been connected to by a client. + if self._address is not None: + for pipe in self._free_instances: + pipe.close() + self._pipe = None + self._address = None + self._free_instances.clear() + + __del__ = close + + +class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Windows version of selector event loop.""" + + def _socketpair(self): + return windows_utils.socketpair() + + +class ProactorEventLoop(proactor_events.BaseProactorEventLoop): + """Windows version of proactor event loop using IOCP.""" + + def __init__(self, proactor=None): + if proactor is None: + proactor = IocpProactor() + super(ProactorEventLoop, self).__init__(proactor) + + def _socketpair(self): + return windows_utils.socketpair() + + @coroutine + def create_pipe_connection(self, protocol_factory, address): + f = self._proactor.connect_pipe(address) + pipe = yield From(f) + protocol = protocol_factory() + trans = self._make_duplex_pipe_transport(pipe, protocol, + extra={'addr': address}) + raise Return(trans, protocol) + + @coroutine + def start_serving_pipe(self, protocol_factory, address): + server = PipeServer(address) + + def loop_accept_pipe(f=None): + pipe = None + try: + if f: + pipe = f.result() + server._free_instances.discard(pipe) + + if server.closed(): + # A client connected before the server was closed: + # drop the client (close the pipe) and exit + pipe.close() + return + + protocol = protocol_factory() + self._make_duplex_pipe_transport( + pipe, protocol, extra={'addr': address}) + + pipe = server._get_unconnected_pipe() + if pipe is None: + return + + f = self._proactor.accept_pipe(pipe) + except OSError as exc: + if pipe and pipe.fileno() != -1: + self.call_exception_handler({ + 'message': 'Pipe accept failed', + 'exception': exc, + 'pipe': pipe, + }) + pipe.close() + elif self._debug: + logger.warning("Accept pipe failed on pipe %r", + pipe, exc_info=True) + except futures.CancelledError: + if pipe: + pipe.close() + else: + server._accept_pipe_future = f + f.add_done_callback(loop_accept_pipe) + + self.call_soon(loop_accept_pipe) + return [server] + + @coroutine + def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + waiter = futures.Future(loop=self) + transp = _WindowsSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + try: + yield From(waiter) + except Exception as exc: + # Workaround CPython bug #23353: using yield/yield-from in an + # except block of a generator doesn't clear properly sys.exc_info() + err = exc + else: + err = None + + if err is not None: + transp.close() + yield From(transp._wait()) + raise err + + raise Return(transp) + + +class IocpProactor(object): + """Proactor implementation using IOCP.""" + + def __init__(self, concurrency=0xffffffff): + self._loop = None + self._results = [] + self._iocp = _overlapped.CreateIoCompletionPort( + _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency) + self._cache = {} + self._registered = weakref.WeakSet() + self._unregistered = [] + self._stopped_serving = weakref.WeakSet() + + def __repr__(self): + return ('<%s overlapped#=%s result#=%s>' + % (self.__class__.__name__, len(self._cache), + len(self._results))) + + def set_loop(self, loop): + self._loop = loop + + def select(self, timeout=None): + if not self._results: + self._poll(timeout) + tmp = self._results + self._results = [] + return tmp + + def _result(self, value): + fut = futures.Future(loop=self._loop) + fut.set_result(value) + return fut + + def recv(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + wrap_error(ov.WSARecv, conn.fileno(), nbytes, flags) + else: + wrap_error(ov.ReadFile, conn.fileno(), nbytes) + except BrokenPipeError: + return self._result(b'') + + def finish_recv(trans, key, ov): + try: + return wrap_error(ov.getresult) + except WindowsError as exc: + if exc.winerror == _overlapped.ERROR_NETNAME_DELETED: + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def send(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + if isinstance(conn, socket.socket): + ov.WSASend(conn.fileno(), buf, flags) + else: + ov.WriteFile(conn.fileno(), buf) + + def finish_send(trans, key, ov): + try: + return wrap_error(ov.getresult) + except WindowsError as exc: + if exc.winerror == _overlapped.ERROR_NETNAME_DELETED: + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def accept(self, listener): + self._register_with_iocp(listener) + conn = self._get_accept_socket(listener.family) + ov = _overlapped.Overlapped(NULL) + ov.AcceptEx(listener.fileno(), conn.fileno()) + + def finish_accept(trans, key, ov): + wrap_error(ov.getresult) + # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work. + buf = struct.pack('@P', listener.fileno()) + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf) + conn.settimeout(listener.gettimeout()) + return conn, conn.getpeername() + + @coroutine + def accept_coro(future, conn): + # Coroutine closing the accept socket if the future is cancelled + try: + yield From(future) + except futures.CancelledError: + conn.close() + raise + + future = self._register(ov, listener, finish_accept) + coro = accept_coro(future, conn) + tasks.ensure_future(coro, loop=self._loop) + return future + + def connect(self, conn, address): + self._register_with_iocp(conn) + # The socket needs to be locally bound before we call ConnectEx(). + try: + _overlapped.BindLocal(conn.fileno(), conn.family) + except WindowsError as e: + if e.winerror != errno.WSAEINVAL: + raise + # Probably already locally bound; check using getsockname(). + if conn.getsockname()[1] == 0: + raise + ov = _overlapped.Overlapped(NULL) + ov.ConnectEx(conn.fileno(), address) + + def finish_connect(trans, key, ov): + wrap_error(ov.getresult) + # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work. + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0) + return conn + + return self._register(ov, conn, finish_connect) + + def accept_pipe(self, pipe): + self._register_with_iocp(pipe) + ov = _overlapped.Overlapped(NULL) + connected = ov.ConnectNamedPipe(pipe.fileno()) + + if connected: + # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means + # that the pipe is connected. There is no need to wait for the + # completion of the connection. + return self._result(pipe) + + def finish_accept_pipe(trans, key, ov): + wrap_error(ov.getresult) + return pipe + + return self._register(ov, pipe, finish_accept_pipe) + + @coroutine + def connect_pipe(self, address): + delay = CONNECT_PIPE_INIT_DELAY + while True: + # Unfortunately there is no way to do an overlapped connect to a pipe. + # Call CreateFile() in a loop until it doesn't fail with + # ERROR_PIPE_BUSY + try: + handle = wrap_error(_overlapped.ConnectPipe, address) + break + except WindowsError as exc: + if exc.winerror != _overlapped.ERROR_PIPE_BUSY: + raise + + # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later + delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) + yield From(tasks.sleep(delay, loop=self._loop)) + + raise Return(windows_utils.PipeHandle(handle)) + + def wait_for_handle(self, handle, timeout=None): + """Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + """ + return self._wait_for_handle(handle, timeout, False) + + def _wait_cancel(self, event, done_callback): + fut = self._wait_for_handle(event, None, True) + # add_done_callback() cannot be used because the wait may only complete + # in IocpProactor.close(), while the event loop is not running. + fut._done_callback = done_callback + return fut + + def _wait_for_handle(self, handle, timeout, _is_cancel): + if timeout is None: + ms = _winapi.INFINITE + else: + # RegisterWaitForSingleObject() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = int(math.ceil(timeout * 1e3)) + + # We only create ov so we can use ov.address as a key for the cache. + ov = _overlapped.Overlapped(NULL) + wait_handle = _overlapped.RegisterWaitWithQueue( + handle, self._iocp, ov.address, ms) + if _is_cancel: + f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop) + else: + f = _WaitHandleFuture(ov, handle, wait_handle, self, + loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + + def finish_wait_for_handle(trans, key, ov): + # Note that this second wait means that we should only use + # this with handles types where a successful wait has no + # effect. So events or processes are all right, but locks + # or semaphores are not. Also note if the handle is + # signalled and then quickly reset, then we may return + # False even though we have not timed out. + return f._poll() + + self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle) + return f + + def _register_with_iocp(self, obj): + # To get notifications of finished ops on this objects sent to the + # completion port, were must register the handle. + if obj not in self._registered: + self._registered.add(obj) + _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0) + # XXX We could also use SetFileCompletionNotificationModes() + # to avoid sending notifications to completion port of ops + # that succeed immediately. + + def _register(self, ov, obj, callback): + # Return a future which will be set with the result of the + # operation when it completes. The future's value is actually + # the value returned by callback(). + f = _OverlappedFuture(ov, loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + if not ov.pending: + # The operation has completed, so no need to postpone the + # work. We cannot take this short cut if we need the + # NumberOfBytes, CompletionKey values returned by + # PostQueuedCompletionStatus(). + try: + value = callback(None, None, ov) + except OSError as e: + f.set_exception(e) + else: + f.set_result(value) + # Even if GetOverlappedResult() was called, we have to wait for the + # notification of the completion in GetQueuedCompletionStatus(). + # Register the overlapped operation to keep a reference to the + # OVERLAPPED object, otherwise the memory is freed and Windows may + # read uninitialized memory. + + # Register the overlapped operation for later. Note that + # we only store obj to prevent it from being garbage + # collected too early. + self._cache[ov.address] = (f, ov, obj, callback) + return f + + def _unregister(self, ov): + """Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + """ + self._unregistered.append(ov) + + def _get_accept_socket(self, family): + s = socket.socket(family) + s.settimeout(0) + return s + + def _poll(self, timeout=None): + if timeout is None: + ms = INFINITE + elif timeout < 0: + raise ValueError("negative timeout") + else: + # GetQueuedCompletionStatus() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = int(math.ceil(timeout * 1e3)) + if ms >= INFINITE: + raise ValueError("timeout too big") + + while True: + status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) + if status is None: + break + ms = 0 + + err, transferred, key, address = status + try: + f, ov, obj, callback = self._cache.pop(address) + except KeyError: + if self._loop.get_debug(): + self._loop.call_exception_handler({ + 'message': ('GetQueuedCompletionStatus() returned an ' + 'unexpected event'), + 'status': ('err=%s transferred=%s key=%#x address=%#x' + % (err, transferred, key, address)), + }) + + # key is either zero, or it is used to return a pipe + # handle which should be closed to avoid a leak. + if key not in (0, _overlapped.INVALID_HANDLE_VALUE): + _winapi.CloseHandle(key) + continue + + if obj in self._stopped_serving: + f.cancel() + # Don't call the callback if _register() already read the result or + # if the overlapped has been cancelled + elif not f.done(): + try: + value = callback(transferred, key, ov) + except OSError as e: + f.set_exception(e) + self._results.append(f) + else: + f.set_result(value) + self._results.append(f) + + # Remove unregisted futures + for ov in self._unregistered: + self._cache.pop(ov.address, None) + del self._unregistered[:] + + def _stop_serving(self, obj): + # obj is a socket or pipe handle. It will be closed in + # BaseProactorEventLoop._stop_serving() which will make any + # pending operations fail quickly. + self._stopped_serving.add(obj) + + def close(self): + # Cancel remaining registered operations. + for address, (fut, ov, obj, callback) in list(self._cache.items()): + if fut.cancelled(): + # Nothing to do with cancelled futures + pass + elif isinstance(fut, _WaitCancelFuture): + # _WaitCancelFuture must not be cancelled + pass + else: + try: + fut.cancel() + except OSError as exc: + if self._loop is not None: + context = { + 'message': 'Cancelling a future failed', + 'exception': exc, + 'future': fut, + } + if fut._source_traceback: + context['source_traceback'] = fut._source_traceback + self._loop.call_exception_handler(context) + + while self._cache: + if not self._poll(1): + logger.debug('taking long time to close proactor') + + self._results = [] + if self._iocp is not None: + _winapi.CloseHandle(self._iocp) + self._iocp = None + + def __del__(self): + self.close() + + +class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + self._proc = windows_utils.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + bufsize=bufsize, **kwargs) + + def callback(f): + returncode = self._proc.poll() + self._process_exited(returncode) + + f = self._loop._proactor.wait_for_handle(int(self._proc._handle)) + f.add_done_callback(callback) + + +SelectorEventLoop = _WindowsSelectorEventLoop + + +class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = SelectorEventLoop + + +DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy diff --git a/trollius/windows_utils.py b/trollius/windows_utils.py new file mode 100644 index 00000000..288d5478 --- /dev/null +++ b/trollius/windows_utils.py @@ -0,0 +1,238 @@ +""" +Various Windows specific bits and pieces +""" +from __future__ import absolute_import + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import itertools +import msvcrt +import os +import socket +import subprocess +import tempfile +import warnings + +import six + +from . import py33_winapi as _winapi +from . import compat +from .py33_exceptions import wrap_error, BlockingIOError, InterruptedError + + +__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle'] + + +# Constants/globals + + +BUFSIZE = 8192 +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +_mmap_counter = itertools.count() + + +if hasattr(socket, 'socketpair'): + # Since Python 3.5, socket.socketpair() is now also available on Windows + socketpair = socket.socketpair +else: + # Replacement for socket.socketpair() + def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): + """A socket pair usable as a self-pipe, for Windows. + + Origin: https://gist.github.com/4325783, by Geert Jansen. + Public domain. + """ + if family == socket.AF_INET: + host = '127.0.0.1' + elif family == socket.AF_INET6: + host = '::1' + else: + raise ValueError("Only AF_INET and AF_INET6 socket address " + "families are supported") + if type != socket.SOCK_STREAM: + raise ValueError("Only SOCK_STREAM socket type is supported") + if proto != 0: + raise ValueError("Only protocol zero is supported") + + # We create a connected TCP socket. Note the trick with setblocking(0) + # that prevents us from having to create a thread. + lsock = socket.socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen(1) + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket.socket(family, type, proto) + try: + csock.setblocking(False) + try: + wrap_error(csock.connect, (addr, port)) + except (BlockingIOError, InterruptedError): + pass + csock.setblocking(True) + ssock, _ = lsock.accept() + except: + csock.close() + raise + finally: + lsock.close() + return (ssock, csock) + + +# Replacement for os.pipe() using handles instead of fds + + +def pipe(duplex=False, overlapped=(True, True), bufsize=BUFSIZE): + """Like os.pipe() but with overlapped support and using handles not fds.""" + address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' % + (os.getpid(), next(_mmap_counter))) + + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = bufsize, bufsize + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, bufsize + + openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + + if overlapped[0]: + openmode |= _winapi.FILE_FLAG_OVERLAPPED + + if overlapped[1]: + flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED + else: + flags_and_attribs = 0 + + h1 = h2 = None + try: + h1 = _winapi.CreateNamedPipe( + address, openmode, _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + flags_and_attribs, _winapi.NULL) + + ov = _winapi.ConnectNamedPipe(h1, overlapped=True) + if hasattr(ov, 'GetOverlappedResult'): + # _winapi module of Python 3.3 + ov.GetOverlappedResult(True) + else: + # _overlapped module + wrap_error(ov.getresult, True) + return h1, h2 + except: + if h1 is not None: + _winapi.CloseHandle(h1) + if h2 is not None: + _winapi.CloseHandle(h2) + raise + + +# Wrapper for a pipe handle + + +class PipeHandle(object): + """Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + """ + def __init__(self, handle): + self._handle = handle + + def __repr__(self): + if self._handle is not None: + handle = 'handle=%r' % self._handle + else: + handle = 'closed' + return '<%s %s>' % (self.__class__.__name__, handle) + + @property + def handle(self): + return self._handle + + def fileno(self): + if self._handle is None: + raise ValueError("I/O operatioon on closed pipe") + return self._handle + + def close(self, CloseHandle=_winapi.CloseHandle): + if self._handle is not None: + CloseHandle(self._handle) + self._handle = None + + def __del__(self): + if self._handle is not None: + if six.PY3: + warnings.warn("unclosed %r" % self, ResourceWarning) + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + self.close() + + +# Replacement for subprocess.Popen using overlapped pipe handles + + +class Popen(subprocess.Popen): + """Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + """ + def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): + assert not kwds.get('universal_newlines') + assert kwds.get('bufsize', 0) == 0 + stdin_rfd = stdout_wfd = stderr_wfd = None + stdin_wh = stdout_rh = stderr_rh = None + if stdin == PIPE: + stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) + stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) + else: + stdin_rfd = stdin + if stdout == PIPE: + stdout_rh, stdout_wh = pipe(overlapped=(True, False)) + stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) + else: + stdout_wfd = stdout + if stderr == PIPE: + stderr_rh, stderr_wh = pipe(overlapped=(True, False)) + stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) + elif stderr == STDOUT: + stderr_wfd = stdout_wfd + else: + stderr_wfd = stderr + try: + super(Popen, self).__init__(args, + stdin=stdin_rfd, + stdout=stdout_wfd, + stderr=stderr_wfd, + **kwds) + except: + for h in (stdin_wh, stdout_rh, stderr_rh): + if h is not None: + _winapi.CloseHandle(h) + raise + else: + if stdin_wh is not None: + self.stdin = PipeHandle(stdin_wh) + if stdout_rh is not None: + self.stdout = PipeHandle(stdout_rh) + if stderr_rh is not None: + self.stderr = PipeHandle(stderr_rh) + finally: + if stdin == PIPE: + os.close(stdin_rfd) + if stdout == PIPE: + os.close(stdout_wfd) + if stderr == PIPE: + os.close(stderr_wfd) diff --git a/update-asyncio-step1.sh b/update-asyncio-step1.sh new file mode 100755 index 00000000..e2ac4f6f --- /dev/null +++ b/update-asyncio-step1.sh @@ -0,0 +1,12 @@ +set -e -x +git checkout trollius +git pull -u +git checkout master +git pull https://github.com/python/asyncio.git + +git checkout trollius +# rename-threshold=25: a similarity of 25% is enough to consider two files +# rename candidates +git merge -X rename-threshold=25 master + +echo "Now run ./update-tulip-step2.sh" diff --git a/update-asyncio-step2.sh b/update-asyncio-step2.sh new file mode 100755 index 00000000..f813b6d3 --- /dev/null +++ b/update-asyncio-step2.sh @@ -0,0 +1,36 @@ +set -e + +# Check for merge conflicts +if $(git status --porcelain|grep -q '^.U '); then + echo "Fix the following conflicts:" + git status + exit 1 +fi + +# Ensure that yield from is not used +if $(git diff|grep -q 'yield from'); then + echo "yield from present in changed code!" + git diff | grep 'yield from' -B5 -A3 + exit 1 +fi + +# Ensure that mock patchs trollius module, not asyncio +if $(grep -q 'patch.*asyncio' tests/*.py); then + echo "Fix following patch lines in tests/" + grep 'patch.*asyncio' tests/*.py + exit 1 +fi + +# Python 2.6 compatibility +if $(grep -q -E '\{[^0-9].*format' */*.py); then + echo "Issues with Python 2.6 compatibility:" + grep -E '\{[^0-9].*format' */*.py + exit 1 +fi +if $(grep -q -F 'super()' */*.py); then + echo "Issues with Python 2.6 compatibility:" + grep -F 'super()' */*.py + exit 1 +fi + +echo "Now run ./update-tulip-step3.sh" diff --git a/update-asyncio-step3.sh b/update-asyncio-step3.sh new file mode 100755 index 00000000..cc13503e --- /dev/null +++ b/update-asyncio-step3.sh @@ -0,0 +1,10 @@ +set -e -x +./update-asyncio-step2.sh +tox -e py27,py34 + +git status +echo +echo "Now type:" +echo "git commit -m 'Merge asyncio into trollius'" +echo +echo "You may have to add unstaged files"