diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2ae9facd..f957e652 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -113,6 +113,8 @@ Programming style '''Powerline super module''' + from __future__ import (unicode_literals, division, absolute_import, print_function) + import sys from argparse import ArgumentParser diff --git a/MANIFEST.in b/MANIFEST.in index 1e03cb74..e3e42f79 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,5 @@ recursive-include powerline *.json *.vim recursive-include powerline/bindings *.* recursive-exclude powerline/bindings *.pyc *.pyo recursive-include client *.* +recursive-include docs/source *.rst *.py +include docs/Makefile diff --git a/README.rst b/README.rst index efc6d4a0..65dc614d 100644 --- a/README.rst +++ b/README.rst @@ -34,24 +34,24 @@ Features * **Support for prompts and statuslines in many applications.** Originally created exclusively for vim statuslines, the project has evolved to provide statuslines in tmux and several WMs, and prompts for shells like - bash/zsh and other applications. It's simple to write renderers for any - other applications that Powerline doesn't yet support. + bash/zsh and other applications. It’s simple to write renderers for any + other applications that Powerline doesn’t yet support. * **Configuration and colorschemes written in JSON.** JSON is a standardized, simple and easy to use file format that allows for easy - user configuration across all of Powerline's supported applications. + user configuration across all of Powerline’s supported applications. * **Fast and lightweight, with daemon support for even better performance.** Although the code base spans a couple of thousand lines of code with no - goal of "less than X lines of code", the main focus is on good performance + goal of “less than X lines of code”, the main focus is on good performance and as little code as possible while still providing a rich set of features. The new daemon also ensures that only one Python instance is launched for prompts and statuslines, which provides excellent performance. -*But I hate Python / I don't need shell prompts / this is just too much +*But I hate Python / I don’t need shell prompts / this is just too much hassle for me / what happened to the original vim-powerline project / …* You should check out some of the Powerline derivatives. The most lightweight -and feature-rich alternative is currently Bailey Ling's `vim-airline +and feature-rich alternative is currently Bailey Ling’s `vim-airline `_ project. ------ diff --git a/client/powerline.py b/client/powerline.py index 07a01cb6..78403887 100755 --- a/client/powerline.py +++ b/client/powerline.py @@ -7,13 +7,20 @@ import socket import errno import os -from locale import getpreferredencoding - try: from posix import environ except ImportError: from os import environ +# XXX Hack for importing powerline modules to work. +sys.path.pop(0) + +try: + from powerline.lib.encoding import get_preferred_output_encoding +except ImportError: + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))) + from powerline.lib.encoding import get_preferred_output_encoding + if len(sys.argv) < 2: print('Must provide at least one argument.', file=sys.stderr) @@ -51,7 +58,7 @@ except Exception: args = ['powerline-render'] + sys.argv[1:] os.execvp('powerline-render', args) -fenc = getpreferredencoding() or 'utf-8' +fenc = get_preferred_output_encoding() def tobytes(s): diff --git a/docs/source/conf.py b/docs/source/conf.py index f451f462..ea3b7f54 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -11,7 +11,7 @@ sys.path.insert(0, os.path.abspath(os.getcwd())) extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] source_suffix = '.rst' master_doc = 'index' -project = u'Powerline' +project = 'Powerline' version = 'beta' release = 'beta' exclude_patterns = ['_build'] @@ -22,7 +22,7 @@ html_show_copyright = False on_rtd = os.environ.get('READTHEDOCS', None) == 'True' -if not on_rtd: # only import and set the theme if we're building docs locally +if not on_rtd: # only import and set the theme if we’re building docs locally try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' diff --git a/docs/source/configuration.rst b/docs/source/configuration.rst index fa7c420d..ed31faba 100644 --- a/docs/source/configuration.rst +++ b/docs/source/configuration.rst @@ -87,29 +87,29 @@ You can move the segment dictionaries around to change the segment positions, or remove the entire dictionary to remove the segment from the prompt or statusline. -.. note:: It's essential that the contents of all your configuration files - is valid JSON! It's strongly recommended that you run your configuration +.. note:: It’s essential that the contents of all your configuration files + is valid JSON! It’s strongly recommended that you run your configuration files through ``jsonlint`` after changing them. -Some segments need a user configuration to work properly. Here's a couple of +Some segments need a user configuration to work properly. Here’s a couple of segments that you may want to customize right away: **E-mail alert segment** You have to set your username and password (and possibly server/port) - for the e-mail alert segment. If you're using GMail it's recommended + for the e-mail alert segment. If you’re using GMail it’s recommended that you `generate an application-specific password `_ for this purpose. Open a theme file, scroll down to the ``email_imap_alert`` segment and - set your ``username`` and ``password``. The server defaults to GMail's + set your ``username`` and ``password``. The server defaults to GMail’s IMAP server, but you can set the server/port by adding a ``server`` and a ``port`` argument. **Weather segment** The weather segment will try to find your location using a GeoIP lookup, - so unless you're on a VPN you probably won't have to change the location + so unless you’re on a VPN you probably won’t have to change the location query. - If you want to change the location query or the temperature unit you'll + If you want to change the location query or the temperature unit you’ll have to update the segment arguments. Open a theme file, scroll down to the weather segment and update it to include unit/location query arguments: diff --git a/docs/source/configuration/reference.rst b/docs/source/configuration/reference.rst index 76931690..4e3ec2b7 100644 --- a/docs/source/configuration/reference.rst +++ b/docs/source/configuration/reference.rst @@ -177,7 +177,7 @@ Color definitions * A cterm color index. * A list with a cterm color index and a hex color string (e.g. ``[123, "aabbcc"]``). This is useful for colorschemes that use colors that - aren't available in color terminals. + aren’t available in color terminals. ``gradients`` Gradient definitions, consisting of a dict where the key is the name of the @@ -236,7 +236,7 @@ override those from each previous file. It is required that either ``mode_translations`` Mode-specific highlighting for extensions that support it (e.g. the vim - extension). It's an easy way of changing a color in a specific mode. + extension). It’s an easy way of changing a color in a specific mode. Consists of a dict where the key is the mode and the value is a dict with the following options: @@ -308,7 +308,7 @@ ascii Theme without any unicode characters at all ``dividers`` Defines the dividers used in all Powerline extensions. This option - should usually only be changed if you don't have a patched font, or if + should usually only be changed if you don’t have a patched font, or if you use a font patched with the legacy font patcher. The ``hard`` dividers are used to divide segments with different diff --git a/docs/source/configuration/segments.rst b/docs/source/configuration/segments.rst index 7b6edaaf..63b4975e 100644 --- a/docs/source/configuration/segments.rst +++ b/docs/source/configuration/segments.rst @@ -15,7 +15,7 @@ always absolute. Segments are regular Python functions, and they may accept arguments. All arguments should have a default value which will be used for themes that -don't provide an ``args`` dict. +don’t provide an ``args`` dict. More information is available in :ref:`Writing segments ` section. diff --git a/docs/source/configuration/segments/common.rst b/docs/source/configuration/segments/common.rst index 49dc2519..5d52d69a 100644 --- a/docs/source/configuration/segments/common.rst +++ b/docs/source/configuration/segments/common.rst @@ -2,5 +2,56 @@ Common segments *************** -.. automodule:: powerline.segments.common +VCS submodule +============= + +.. automodule:: powerline.segments.common.vcs + :members: + +System properties +================= + +.. automodule:: powerline.segments.common.sys + :members: + +Network +======= + +.. automodule:: powerline.segments.common.net + :members: + +Current environment +=================== + +.. automodule:: powerline.segments.common.env + :members: + +Battery +======= + +.. automodule:: powerline.segments.common.bat + :members: + +Weather +======= + +.. automodule:: powerline.segments.common.wthr + :members: + +Date and time +============= + +.. automodule:: powerline.segments.common.time + :members: + +Mail +==== + +.. automodule:: powerline.segments.common.mail + :members: + +Media players +============= + +.. automodule:: powerline.segments.common.players :members: diff --git a/docs/source/develop/segments.rst b/docs/source/develop/segments.rst index 6454caa8..f69a600c 100644 --- a/docs/source/develop/segments.rst +++ b/docs/source/develop/segments.rst @@ -371,6 +371,10 @@ Vim ``segment_info`` argument is a dictionary with the following keys: ``mode`` Current mode. +``encoding`` + Value of ``&encoding`` from the time when powerline was initialized. It + should be used to convert return values. + .. note:: Your segment generally should not assume that it is run for the current window, current buffer or current tabpage. “Current window” and “current diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 9fa1b87f..089cd190 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -90,8 +90,8 @@ Patched fonts This method is the fallback method and works for every terminal, with the exception of :ref:`rxvt-unicode `. -Download the font of your choice from `powerline-fonts`_. If you can't find -your preferred font in the `powerline-fonts`_ repo, you'll have to patch your +Download the font of your choice from `powerline-fonts`_. If you can’t find +your preferred font in the `powerline-fonts`_ repo, you’ll have to patch your own font instead. .. _powerline-fonts: https://github.com/Lokaltog/powerline-fonts diff --git a/docs/source/installation/linux.rst b/docs/source/installation/linux.rst index 5f8cd6d7..7b7f17c8 100644 --- a/docs/source/installation/linux.rst +++ b/docs/source/installation/linux.rst @@ -10,7 +10,7 @@ automatically do most of the configuration for you. * `Arch Linux (AUR), Python 3 version `_ * Gentoo Live ebuild in `raiagent `_ overlay -If you're running a distribution without an official package you'll have to +If you’re running a distribution without an official package you’ll have to follow the installation guide below: 1. Install Python 3.2+ or Python 2.6+ with ``pip``. This step is @@ -34,8 +34,8 @@ Fonts installation Fontconfig ---------- -This method only works on Linux. It's the recommended method if your -terminal emulator supports it as you don't have to patch any fonts, and it +This method only works on Linux. It’s the recommended method if your +terminal emulator supports it as you don’t have to patch any fonts, and it generally works well with any coding font. #. Download the latest version of the symbol font and fontconfig file:: @@ -54,16 +54,16 @@ generally works well with any coding font. fc-cache -vf ~/.fonts/ #. Install the fontconfig file. For newer versions of fontconfig the config - path is ``~/.config/fontconfig/conf.d/``, for older versions it's + path is ``~/.config/fontconfig/conf.d/``, for older versions it’s ``~/.fonts.conf.d/``:: mv 10-powerline-symbols.conf ~/.config/fontconfig/conf.d/ -If you can't see the custom symbols, please close all instances of your +If you can’t see the custom symbols, please close all instances of your terminal emulator. You may need to restart X for the changes to take effect. -If you *still* can't see the custom symbols, double-check that you have +If you *still* can’t see the custom symbols, double-check that you have installed the font to a valid X font path, and that you have installed the fontconfig file to a valid fontconfig path. Alternatively try to install a :ref:`patched font `. @@ -87,9 +87,9 @@ After installing the patched font you need to update Gvim or your terminal emulator to use the patched font. The correct font usually ends with *for Powerline*. -If you can't see the custom symbols, please close all instances of your +If you can’t see the custom symbols, please close all instances of your terminal emulator. You may need to restart X for the changes to take effect. -If you *still* can't see the custom symbols, double-check that you have +If you *still* can’t see the custom symbols, double-check that you have installed the font to a valid X font path. diff --git a/docs/source/installation/osx.rst b/docs/source/installation/osx.rst index fc2f6f8f..325e8f7d 100644 --- a/docs/source/installation/osx.rst +++ b/docs/source/installation/osx.rst @@ -45,7 +45,7 @@ Vim installation ================ Any terminal vim version with Python 3.2+ or Python 2.6+ support should work, -but if you're using MacVim you need to install it using the following command:: +but if you’re using MacVim you need to install it using the following command:: brew install macvim --env-std --override-system-vim diff --git a/docs/source/overview.rst b/docs/source/overview.rst index b599b349..73804ea5 100644 --- a/docs/source/overview.rst +++ b/docs/source/overview.rst @@ -20,24 +20,24 @@ Features * **Support for prompts and statuslines in many applications.** Originally created exclusively for vim statuslines, the project has evolved to provide statuslines in tmux and several WMs, and prompts for shells like - bash/zsh and other applications. It's simple to write renderers for any - other applications that Powerline doesn't yet support. + bash/zsh and other applications. It’s simple to write renderers for any + other applications that Powerline doesn’t yet support. * **Configuration and colorschemes written in JSON.** JSON is a standardized, simple and easy to use file format that allows for easy - user configuration across all of Powerline's supported applications. + user configuration across all of Powerline’s supported applications. * **Fast and lightweight, with daemon support for even better performance.** Although the code base spans a couple of thousand lines of code with no - goal of "less than X lines of code", the main focus is on good performance + goal of “less than X lines of code”, the main focus is on good performance and as little code as possible while still providing a rich set of features. The new daemon also ensures that only one Python instance is launched for prompts and statuslines, which provides excellent performance. -*But I hate Python / I don't need shell prompts / this is just too much +*But I hate Python / I don’t need shell prompts / this is just too much hassle for me / what happened to the original vim-powerline project / …* You should check out some of the Powerline derivatives. The most lightweight -and feature-rich alternative is currently Bailey Ling's `vim-airline +and feature-rich alternative is currently Bailey Ling’s `vim-airline `_ project. Screenshots diff --git a/docs/source/tips-and-tricks.rst b/docs/source/tips-and-tricks.rst index 4be6c764..541b593c 100644 --- a/docs/source/tips-and-tricks.rst +++ b/docs/source/tips-and-tricks.rst @@ -47,7 +47,7 @@ In the ``~/.Xdefaults`` add the following:: I noticed that Source Code Pro has the glyphs there already, but the pixel size of the fonts play a role in whether or not the > or the < separators showing up or not. Using font size 12, glyphs on the right hand side of the powerline are -present, but the ones on the left don't. Pixel size 14, brings the reverse +present, but the ones on the left don’t. Pixel size 14, brings the reverse problem. Font size 13 seems to work just fine. Reloading powerline after update diff --git a/docs/source/troubleshooting.rst b/docs/source/troubleshooting.rst index 120d7107..9583c713 100644 --- a/docs/source/troubleshooting.rst +++ b/docs/source/troubleshooting.rst @@ -13,14 +13,14 @@ System-specific issues Common issues ============= -I'm using tmux and Powerline looks like crap, what's wrong? +I’m using tmux and Powerline looks like crap, what’s wrong? ----------------------------------------------------------- * You need to tell tmux that it has 256-color capabilities. Add this to your :file:`.tmux.conf` to solve this issue:: set -g default-terminal "screen-256color" -* If you're using iTerm2, make sure that you have enabled the setting +* If you’re using iTerm2, make sure that you have enabled the setting :guilabel:`Set locale variables automatically` in :menuselection:`Profiles --> Terminal --> Environment`. * Make sure tmux knows that terminal it is running in support 256 colors. You diff --git a/docs/source/troubleshooting/linux.rst b/docs/source/troubleshooting/linux.rst index ed069d94..9b3a5113 100644 --- a/docs/source/troubleshooting/linux.rst +++ b/docs/source/troubleshooting/linux.rst @@ -2,19 +2,27 @@ Troubleshooting on Linux ************************ -I can't see any fancy symbols, what's wrong? +I can’t see any fancy symbols, what’s wrong? -------------------------------------------- -* Make sure that you've configured gvim or your terminal emulator to use +* Make sure that you’ve configured gvim or your terminal emulator to use a patched font. * You need to set your ``LANG`` and ``LC_*`` environment variables to - a UTF-8 locale (e.g. ``LANG=en_US.utf8``). Consult your Linux distro's + a UTF-8 locale (e.g. ``LANG=en_US.utf8``). Consult your Linux distro’s documentation for information about setting these variables correctly. * Make sure that vim is compiled with the ``--with-features=big`` flag. -* If you're using rxvt-unicode, make sure that it's compiled with the +* If you’re using rxvt-unicode make sure that it’s compiled with the ``--enable-unicode3`` flag. +* If you’re using xterm make sure you have told it to work with unicode. You may + need ``-u8`` command-line argument, ``uxterm`` shell wrapper that is usually + shipped with xterm for this or ``xterm*utf8`` property set to ``1`` or ``2`` + in ``~/.Xresources`` (applied with ``xrdb``). Note that in case ``uxterm`` is + used configuration is done via ``uxterm*…`` properties and not ``xterm*…``. -The fancy symbols look a bit blurry or "off"! + In any case the only absolute requirement is launching xterm with UTF-8 + locale. + +The fancy symbols look a bit blurry or “off”! --------------------------------------------- * Make sure that you have patched all variants of your font (i.e. both the diff --git a/docs/source/troubleshooting/osx.rst b/docs/source/troubleshooting/osx.rst index 653a06be..4be81490 100644 --- a/docs/source/troubleshooting/osx.rst +++ b/docs/source/troubleshooting/osx.rst @@ -2,21 +2,21 @@ Troubleshooting on OS X *********************** -I can't see any fancy symbols, what's wrong? +I can’t see any fancy symbols, what’s wrong? -------------------------------------------- -* If you're using iTerm2, please update to `this revision +* If you’re using iTerm2, please update to `this revision `_ or newer. * You need to set your ``LANG`` and ``LC_*`` environment variables to - a UTF-8 locale (e.g. ``LANG=en_US.utf8``). Consult your Linux distro's + a UTF-8 locale (e.g. ``LANG=en_US.utf8``). Consult your Linux distro’s documentation for information about setting these variables correctly. The colors look weird in the default OS X Terminal app! ------------------------------------------------------- -* The arrows may have the wrong colors if you have changed the "minimum - contrast" slider in the color tab of your OS X settings. +* The arrows may have the wrong colors if you have changed the “minimum + contrast” slider in the color tab of your OS X settings. * The default OS X Terminal app is known to have some issues with the Powerline colors. Please use another terminal emulator. iTerm2 should work fine. @@ -24,8 +24,8 @@ The colors look weird in the default OS X Terminal app! The colors look weird in iTerm2! -------------------------------- -* The arrows may have the wrong colors if you have changed the "minimum - contrast" slider in the color tab of your OS X settings. +* The arrows may have the wrong colors if you have changed the “minimum + contrast” slider in the color tab of your OS X settings. * Please disable background transparency to resolve this issue. Statusline is getting wrapped to the next line in iTerm2 diff --git a/docs/source/usage.rst b/docs/source/usage.rst index c5d829fb..a5301c92 100644 --- a/docs/source/usage.rst +++ b/docs/source/usage.rst @@ -12,11 +12,11 @@ The vim plugin requires a vim version with Python support compiled in. You can check if your vim supports Python by running ``vim --version | grep +python``. -If your vim version doesn't have support for Python, you'll have to compile +If your vim version doesn’t have support for Python, you’ll have to compile it with the ``--enable-pythoninterp`` flag (``--enable-python3interp`` if you want Python 3 support instead). Note that this also requires the related Python headers to be installed on your system. Please consult your -distribution's documentation for details on how to compile and install +distribution’s documentation for details on how to compile and install packages. Vim version 7.4 or newer is recommended for performance reasons, but Powerline diff --git a/docs/source/usage/other.rst b/docs/source/usage/other.rst index 06b39472..befd9a06 100644 --- a/docs/source/usage/other.rst +++ b/docs/source/usage/other.rst @@ -25,7 +25,7 @@ directory: set rtp+={repository_root}/powerline/bindings/vim -If you're using pathogen and don't want Powerline functionality in any other +If you’re using pathogen and don’t want Powerline functionality in any other applications, simply add Powerline as a bundle and point the path above to the Powerline bundle directory, e.g. ``~/.vim/bundle/powerline/powerline/bindings/vim``. diff --git a/powerline/__init__.py b/powerline/__init__.py index 1d01d5d5..026231b9 100644 --- a/powerline/__init__.py +++ b/powerline/__init__.py @@ -5,7 +5,6 @@ import os import sys import logging -from locale import getpreferredencoding from threading import Lock, Event from powerline.colorscheme import Colorscheme @@ -13,6 +12,11 @@ from powerline.lib.config import ConfigLoader from powerline.lib.unicode import safe_unicode, FailedUnicode from powerline.config import DEFAULT_SYSTEM_CONFIG_DIR from powerline.lib import mergedicts +from powerline.lib.encoding import get_preferred_output_encoding + + +class NotInterceptedError(BaseException): + pass def _config_loader_condition(path): @@ -413,7 +417,7 @@ class Powerline(object): self.setup_kwargs = {} self.imported_modules = set() - get_encoding = staticmethod(getpreferredencoding) + get_encoding = staticmethod(get_preferred_output_encoding) '''Get encoding used by the current application Usually returns encoding of the current locale. @@ -744,16 +748,12 @@ class Powerline(object): self.update_renderer() return self.renderer.render(*args, **kwargs) except Exception as e: + exc = e try: self.exception('Failed to render: {0}', str(e)) except Exception as e: - # Updates e variable to new value, masking previous one. - # Normally it is the same exception (due to raise in case pl is - # unset), but it may also show error in logger. Note that latter - # is not logged by logger for obvious reasons, thus this also - # prevents us from seeing logger traceback. - pass - return FailedUnicode(safe_unicode(e)) + exc = e + return FailedUnicode(safe_unicode(exc)) def render_above_lines(self, *args, **kwargs): '''Like .render(), but for ``self.renderer.render_above_lines()`` @@ -763,16 +763,12 @@ class Powerline(object): for line in self.renderer.render_above_lines(*args, **kwargs): yield line except Exception as e: + exc = e try: self.exception('Failed to render: {0}', str(e)) except Exception as e: - # Updates e variable to new value, masking previous one. - # Normally it is the same exception (due to raise in case pl is - # unset), but it may also show error in logger. Note that latter - # is not logged by logger for obvious reasons, thus this also - # prevents us from seeing logger traceback. - pass - yield FailedUnicode(safe_unicode(e)) + exc = e + yield FailedUnicode(safe_unicode(exc)) def setup(self, *args, **kwargs): '''Setup the environment to use powerline. diff --git a/powerline/bindings/awesome/powerline-awesome.py b/powerline/bindings/awesome/powerline-awesome.py index 11d27f57..914a9a18 100755 --- a/powerline/bindings/awesome/powerline-awesome.py +++ b/powerline/bindings/awesome/powerline-awesome.py @@ -33,7 +33,7 @@ def read_to_log(pl, client): while True: start_time = monotonic() s = powerline.render(side='right') - request = "powerline_widget:set_markup('" + s.replace('\\', '\\\\').replace("'", "\\'") + "')\n" + request = 'powerline_widget:set_markup(\'' + s.replace('\\', '\\\\').replace('\'', '\\\'') + '\')\n' client = Popen(['awesome-client'], shell=False, stdout=PIPE, stderr=PIPE, stdin=PIPE) client.stdin.write(request.encode('utf-8')) client.stdin.close() diff --git a/powerline/bindings/bash/powerline.sh b/powerline/bindings/bash/powerline.sh index 751e65f8..cb125a00 100644 --- a/powerline/bindings/bash/powerline.sh +++ b/powerline/bindings/bash/powerline.sh @@ -31,7 +31,7 @@ _powerline_init_tmux_support() { # TMUX variable may be unset to create new tmux session inside this one _POWERLINE_TMUX="$TMUX" - trap "_powerline_tmux_set_columns" WINCH + trap '_powerline_tmux_set_columns' WINCH _powerline_tmux_set_columns test "x$PROMPT_COMMAND" != "x${PROMPT_COMMAND/_powerline_tmux_set_pwd}" || diff --git a/powerline/bindings/config.py b/powerline/bindings/config.py index 71afca84..bbd4974b 100644 --- a/powerline/bindings/config.py +++ b/powerline/bindings/config.py @@ -5,13 +5,12 @@ import os import re import sys -from locale import getpreferredencoding - from powerline.config import POWERLINE_ROOT, TMUX_CONFIG_DIRECTORY from powerline.lib.config import ConfigLoader from powerline import generate_config_finder, load_config, create_logger, PowerlineLogger, finish_common_config from powerline.lib.shell import which from powerline.bindings.tmux import TmuxVersionInfo, run_tmux_command, get_tmux_version +from powerline.lib.encoding import get_preferred_output_encoding CONFIG_FILE_NAME = re.compile(r'powerline_tmux_(?P\d+)\.(?P\d+)(?P[a-z]+)?(?:_(?Pplus|minus))?\.conf') @@ -84,7 +83,7 @@ def get_main_config(args): def create_powerline_logger(args): config = get_main_config(args) - common_config = finish_common_config(getpreferredencoding(), config['common']) + common_config = finish_common_config(get_preferred_output_encoding(), config['common']) logger = create_logger(common_config) return PowerlineLogger(use_daemon_threads=True, logger=logger, ext='config') diff --git a/powerline/bindings/qtile/widget.py b/powerline/bindings/qtile/widget.py index 83acd9fa..897658c9 100644 --- a/powerline/bindings/qtile/widget.py +++ b/powerline/bindings/qtile/widget.py @@ -8,7 +8,7 @@ from powerline import Powerline as PowerlineCore class Powerline(base._TextBox): - def __init__(self, timeout=2, text=" ", width=bar.CALCULATED, **config): + def __init__(self, timeout=2, text=' ', width=bar.CALCULATED, **config): base._TextBox.__init__(self, text, width, **config) self.timeout_add(timeout, self.update) self.powerline = PowerlineCore(ext='wm', renderer_module='pango_markup') diff --git a/powerline/bindings/tmux/powerline.conf b/powerline/bindings/tmux/powerline.conf index 3fb316a3..b8a07517 100644 --- a/powerline/bindings/tmux/powerline.conf +++ b/powerline/bindings/tmux/powerline.conf @@ -1,7 +1,7 @@ if-shell 'test -z "$POWERLINE_CONFIG_COMMAND"' 'set-environment -g POWERLINE_CONFIG_COMMAND powerline-config' -# Don't version-check for this core functionality -- anything too old to -# support these options likely won't work well with powerline +# Don’t version-check for this core functionality -- anything too old to +# support these options likely won’t work well with powerline set -g status on set -g status-utf8 on set -g status-interval 2 diff --git a/powerline/bindings/vim/__init__.py b/powerline/bindings/vim/__init__.py index 4b38897a..15543f04 100644 --- a/powerline/bindings/vim/__init__.py +++ b/powerline/bindings/vim/__init__.py @@ -7,43 +7,127 @@ import codecs try: import vim except ImportError: - vim = {} + vim = object() -if not hasattr(vim, 'bindeval'): - import json +from powerline.lib.unicode import unicode + + +try: + vim_encoding = vim.eval('&encoding') +except AttributeError: + vim_encoding = 'utf-8' + + +python_to_vim_types = { + unicode: ( + lambda o: b'\'' + (o.translate({ + ord('\''): '\'\'', + }).encode(vim_encoding)) + b'\'' + ), + bytes: (lambda o: b'\'' + o.replace(b'\'', b'\'\'') + b'\''), + int: (str if str is bytes else (lambda o: unicode(o).encode('ascii'))), +} +python_to_vim_types[float] = python_to_vim_types[int] + + +def python_to_vim(o): + return python_to_vim_types[type(o)](o) + + +if sys.version_info < (3,): + def str_to_bytes(s): + return s + + def unicode_eval(expr): + ret = vim.eval(expr) + return ret.decode(vim_encoding, 'powerline_vim_strtrans_error') +else: + def str_to_bytes(s): + return s.encode(vim_encoding) + + def unicode_eval(expr): + return vim.eval(expr) + + +def safe_bytes_eval(expr): + return bytes(bytearray(( + int(chunk) for chunk in ( + vim.eval( + b'substitute(' + expr + b', ' + + b'\'^.*$\', \'\\=join(map(range(len(submatch(0))), ' + + b'"char2nr(submatch(0)[v:val])"))\', "")' + ).split() + ) + ))) + + +def eval_bytes(expr): + try: + return str_to_bytes(vim.eval(expr)) + except UnicodeDecodeError: + return safe_bytes_eval(expr) + + +def eval_unicode(expr): + try: + return unicode_eval(expr) + except UnicodeDecodeError: + return safe_bytes_eval(expr).decode(vim_encoding, 'powerline_vim_strtrans_error') if hasattr(vim, 'bindeval'): + rettype_func = { + None: lambda f: f, + 'unicode': ( + lambda f: ( + lambda *args, **kwargs: ( + f(*args, **kwargs).decode( + vim_encoding, 'powerline_vim_strtrans_error' + )))) + } + rettype_func['int'] = rettype_func['bytes'] = rettype_func[None] + rettype_func['str'] = rettype_func['bytes'] if str is bytes else rettype_func['unicode'] + def vim_get_func(f, rettype=None): '''Return a vim function binding.''' try: func = vim.bindeval('function("' + f + '")') - if sys.version_info >= (3,) and rettype is str: - return (lambda *args, **kwargs: func(*args, **kwargs).decode('utf-8', errors='replace')) - return func except vim.error: return None + else: + return rettype_func[rettype](func) else: + rettype_eval = { + None: getattr(vim, 'eval', None), + 'int': lambda expr: int(vim.eval(expr)), + 'bytes': eval_bytes, + 'unicode': eval_unicode, + } + rettype_eval['str'] = rettype_eval[None] + class VimFunc(object): '''Evaluate a vim function using vim.eval(). This is a fallback class for older vim versions. ''' - __slots__ = ('f', 'rettype') + __slots__ = ('f', 'eval') def __init__(self, f, rettype=None): - self.f = f - self.rettype = rettype + self.f = f.encode('utf-8') + self.eval = rettype_eval[rettype] def __call__(self, *args): - r = vim.eval(self.f + '(' + json.dumps(args)[1:-1] + ')') - if self.rettype: - return self.rettype(r) - return r + return self.eval(self.f + b'(' + (b','.join(( + python_to_vim(o) for o in args + ))) + b')') vim_get_func = VimFunc +if type(vim) is object: + vim_get_func = lambda *args, **kwargs: None + + _getbufvar = vim_get_func('getbufvar') @@ -52,7 +136,10 @@ _getbufvar = vim_get_func('getbufvar') if hasattr(vim, 'vvars') and vim.vvars['version'] > 703: _vim_to_python_types = { getattr(vim, 'Dictionary', None) or type(vim.bindeval('{}')): - lambda value: dict(((key, _vim_to_python(value[key])) for key in value.keys())), + lambda value: dict(( + (_vim_to_python(k), _vim_to_python(v)) + for k, v in value.items() + )), getattr(vim, 'List', None) or type(vim.bindeval('[]')): lambda value: [_vim_to_python(item) for item in value], getattr(vim, 'Function', None) or type(vim.bindeval('function("mode")')): @@ -74,7 +161,7 @@ else: list: (lambda value: [_vim_to_python(i) for i in value]), } - _vim_exists = vim_get_func('exists', rettype=int) + _vim_exists = vim_get_func('exists', rettype='int') def vim_getvar(varname): varname = 'g:' + varname @@ -102,7 +189,7 @@ else: if sys.version_info < (3,): getbufvar = _getbufvar else: - _vim_to_python_types[bytes] = lambda value: value.decode('utf-8') + _vim_to_python_types[bytes] = lambda value: value.decode(vim_encoding) def getbufvar(*args): return _vim_to_python(_getbufvar(*args)) @@ -133,7 +220,7 @@ else: def vim_setoption(option, value): vim.command('let &g:{option} = {value}'.format( - option=option, value=json.encode(value))) + option=option, value=python_to_vim(value))) if hasattr(vim, 'tabpages'): @@ -256,29 +343,27 @@ class VimEnviron(object): if sys.version_info < (3,): - def buffer_name(buf): - return buf.name + def buffer_name(segment_info): + return segment_info['buffer'].name else: - vim_bufname = vim_get_func('bufname') + vim_bufname = vim_get_func('bufname', rettype='bytes') - def buffer_name(buf): + def buffer_name(segment_info): try: - name = buf.name + name = segment_info['buffer'].name except UnicodeDecodeError: - return vim_bufname(buf.number) + return vim_bufname(segment_info['bufnr']) else: - return name.encode('utf-8') if name else None + return name.encode(segment_info['encoding']) if name else None -vim_strtrans = vim_get_func('strtrans') +vim_strtrans = vim_get_func('strtrans', rettype='unicode') def powerline_vim_strtrans_error(e): if not isinstance(e, UnicodeDecodeError): raise NotImplementedError - # Assuming &encoding is utf-8 strtrans should not return anything but ASCII - # under current circumstances - text = vim_strtrans(e.object[e.start:e.end]).decode() + text = vim_strtrans(e.object[e.start:e.end]) return (text, e.end) diff --git a/powerline/bindings/vim/plugin/powerline.vim b/powerline/bindings/vim/plugin/powerline.vim index 5ec007c0..11ec05e9 100644 --- a/powerline/bindings/vim/plugin/powerline.vim +++ b/powerline/bindings/vim/plugin/powerline.vim @@ -61,22 +61,24 @@ function s:rcmd(s) endfunction try let s:can_replace_pyeval = !exists('g:powerline_pyeval') - call s:rcmd("try:") - call s:rcmd(" powerline_appended_path = None") - call s:rcmd(" try:") - call s:rcmd(" ".s:import_cmd."") - call s:rcmd(" except ImportError:") - call s:rcmd(" import sys, vim") - call s:rcmd(" powerline_appended_path = vim.eval('expand(\":h:h:h:h:h\")')") - call s:rcmd(" sys.path.append(powerline_appended_path)") - call s:rcmd(" ".s:import_cmd."") - call s:rcmd(" import vim") - call s:rcmd(" VimPowerline().setup(pyeval=vim.eval('s:pyeval'), pycmd=vim.eval('s:pycmd'), can_replace_pyeval=int(vim.eval('s:can_replace_pyeval')))") - call s:rcmd(" del VimPowerline") - call s:rcmd("except Exception:") - call s:rcmd(" import traceback, sys") - call s:rcmd(" traceback.print_exc(file=sys.stdout)") - call s:rcmd(" raise") + call s:rcmd('try:') + call s:rcmd(' powerline_appended_path = None') + call s:rcmd(' try:') + call s:rcmd(' '.s:import_cmd.'') + call s:rcmd(' except ImportError:') + call s:rcmd(' import sys, vim') + call s:rcmd(' powerline_appended_path = vim.eval("expand(\":h:h:h:h:h\")")') + call s:rcmd(' sys.path.append(powerline_appended_path)') + call s:rcmd(' '.s:import_cmd.'') + call s:rcmd(' import vim') + call s:rcmd(' powerline_instance = VimPowerline()') + call s:rcmd(' powerline_instance.setup(pyeval=vim.eval("s:pyeval"), pycmd=vim.eval("s:pycmd"), can_replace_pyeval=int(vim.eval("s:can_replace_pyeval")))') + call s:rcmd(' del VimPowerline') + call s:rcmd(' del powerline_instance') + call s:rcmd('except Exception:') + call s:rcmd(' import traceback, sys') + call s:rcmd(' traceback.print_exc(file=sys.stdout)') + call s:rcmd(' raise') execute s:pycmd s:pystr unlet s:pystr let s:launched = 1 @@ -86,7 +88,7 @@ finally if !exists('s:launched') unlet s:pystr echohl ErrorMsg - echomsg 'An error occurred while importing powerline package.' + echomsg 'An error occurred while importing powerline module.' echomsg 'This could be caused by invalid sys.path setting,' echomsg 'or by an incompatible Python version (powerline requires' echomsg 'Python 2.6, 2.7 or 3.2 and later to work). Please consult' @@ -97,48 +99,61 @@ finally echomsg 'should set g:powerline_pycmd to "py3" to make it load correctly.' endif echohl None - call s:rcmd("def powerline_troubleshoot():") - call s:rcmd(" import sys") - call s:rcmd(" import vim") - call s:rcmd(" if sys.version_info < (2, 6):") - call s:rcmd(" print('Too old python version: ' + sys.version + ' (first supported is 2.6)')") - call s:rcmd(" elif sys.version_info[0] == 3 and sys.version_info[1] < 2:") - call s:rcmd(" print('Too old python 3 version: ' + sys.version + ' (first supported is 3.2)')") - call s:rcmd(" try:") - call s:rcmd(" import powerline") - call s:rcmd(" except ImportError:") - call s:rcmd(" print('Unable to import powerline, is it installed?')") - call s:rcmd(" else:") - call s:rcmd(" if not vim.eval('expand(\"\")').startswith('/usr/'):") - call s:rcmd(" import os") - call s:rcmd(" powerline_dir = os.path.realpath(os.path.normpath(powerline.__file__))") - call s:rcmd(" powerline_dir = os.path.dirname(powerline.__file__)") - call s:rcmd(" this_dir = os.path.realpath(os.path.normpath(vim.eval('expand(\":p\")')))") - call s:rcmd(" this_dir = os.path.dirname(this_dir)") " powerline/bindings/vim/plugin - call s:rcmd(" this_dir = os.path.dirname(this_dir)") " powerline/bindings/vim - call s:rcmd(" this_dir = os.path.dirname(this_dir)") " powerline/bindings - call s:rcmd(" this_dir = os.path.dirname(this_dir)") " powerline - call s:rcmd(" if os.path.basename(this_dir) != 'powerline':") - call s:rcmd(" print('Check your installation:')") - call s:rcmd(" print('this script is not in powerline[/bindings/vim/plugin] directory,')") - call s:rcmd(" print('neither it is installed system-wide')") - call s:rcmd(" real_powerline_dir = os.path.realpath(powerline_dir)") - call s:rcmd(" real_this_dir = os.path.realpath(this_dir)") - call s:rcmd(" this_dir_par = os.path.dirname(real_this_dir)") - call s:rcmd(" powerline_appended_path = globals().get('powerline_appended_path')") - call s:rcmd(" if powerline_appended_path is not None and this_dir_par != powerline_appended_path:") - call s:rcmd(" print('Check your installation: this script is symlinked somewhere')") - call s:rcmd(" print('where powerline is not present: {0!r} != {1!r}.'.format(") - call s:rcmd(" real_this_dir, powerline_appended_path))") - call s:rcmd(" elif real_powerline_dir != real_this_dir:") - call s:rcmd(" print('It appears that you have two powerline versions installed:')") - call s:rcmd(" print('one in ' + real_powerline_dir + ', other in ' + real_this_dir + '.')") - call s:rcmd(" print('You should remove one of this. Check out troubleshooting section,')") - call s:rcmd(" print('it contains some information about the alternatives.')") - call s:rcmd("try:") - call s:rcmd(" powerline_troubleshoot()") - call s:rcmd("finally:") - call s:rcmd(" del powerline_troubleshoot") + call s:rcmd('def powerline_troubleshoot():') + call s:rcmd(' import sys') + call s:rcmd(' import vim') + call s:rcmd(' if sys.version_info < (2, 6):') + call s:rcmd(' print("Too old python version: " + sys.version + " (first supported is 2.6)")') + call s:rcmd(' elif sys.version_info[0] == 3 and sys.version_info[1] < 2:') + call s:rcmd(' print("Too old python 3 version: " + sys.version + " (first supported is 3.2)")') + call s:rcmd(' try:') + call s:rcmd(' import powerline') + call s:rcmd(' except ImportError:') + call s:rcmd(' print("Unable to import powerline, is it installed?")') + call s:rcmd(' else:') + call s:rcmd(' if not vim.eval(''expand("")'').startswith("/usr/"):') + call s:rcmd(' import os') + call s:rcmd(' powerline_dir = os.path.realpath(os.path.normpath(powerline.__file__))') + call s:rcmd(' powerline_dir = os.path.dirname(powerline.__file__)') + call s:rcmd(' this_dir = os.path.realpath(os.path.normpath(vim.eval(''expand(":p")'')))') + call s:rcmd(' this_dir = os.path.dirname(this_dir)') " powerline/bindings/vim/plugin + call s:rcmd(' this_dir = os.path.dirname(this_dir)') " powerline/bindings/vim + call s:rcmd(' this_dir = os.path.dirname(this_dir)') " powerline/bindings + call s:rcmd(' this_dir = os.path.dirname(this_dir)') " powerline + call s:rcmd(' if os.path.basename(this_dir) != "powerline":') + call s:rcmd(' print("Check your installation:")') + call s:rcmd(' print("this script is not in powerline[/bindings/vim/plugin] directory,")') + call s:rcmd(' print("neither it is installed system-wide")') + call s:rcmd(' real_powerline_dir = os.path.realpath(powerline_dir)') + call s:rcmd(' real_this_dir = os.path.realpath(this_dir)') + call s:rcmd(' this_dir_par = os.path.dirname(real_this_dir)') + call s:rcmd(' powerline_appended_path = globals().get("powerline_appended_path")') + call s:rcmd(' if powerline_appended_path is not None and this_dir_par != powerline_appended_path:') + call s:rcmd(' print("Check your installation: this script is symlinked somewhere")') + call s:rcmd(' print("where powerline is not present: {0!r} != {1!r}.".format(') + call s:rcmd(' real_this_dir, powerline_appended_path))') + call s:rcmd(' elif real_powerline_dir != real_this_dir:') + call s:rcmd(' print("It appears that you have two powerline versions installed:")') + call s:rcmd(' print("one in " + real_powerline_dir + ", other in " + real_this_dir + ".")') + call s:rcmd(' print("You should remove one of this. Check out troubleshooting section,")') + call s:rcmd(' print("it contains some information about the alternatives.")') + call s:rcmd(' try:') + call s:rcmd(' from powerline.lint import check') + call s:rcmd(' except ImportError:') + call s:rcmd(' print("Failed to import powerline.lint.check, cannot run powerline-lint")') + call s:rcmd(' else:') + call s:rcmd(' try:') + call s:rcmd(' paths = powerline_instance.get_config_paths()') + call s:rcmd(' except NameError:') + call s:rcmd(' pass') + call s:rcmd(' else:') + call s:rcmd(' from powerline.lint.markedjson.error import echoerr') + call s:rcmd(' ee = lambda *args, **kwargs: echoerr(*args, stream=sys.stdout, **kwargs)') + call s:rcmd(' check(paths=paths, echoerr=ee, require_ext="vim")') + call s:rcmd('try:') + call s:rcmd(' powerline_troubleshoot()') + call s:rcmd('finally:') + call s:rcmd(' del powerline_troubleshoot') execute s:pycmd s:pystr unlet s:pystr unlet s:pycmd diff --git a/powerline/bindings/zsh/__init__.py b/powerline/bindings/zsh/__init__.py index 46940c80..693f5186 100644 --- a/powerline/bindings/zsh/__init__.py +++ b/powerline/bindings/zsh/__init__.py @@ -10,6 +10,8 @@ import zsh from powerline.shell import ShellPowerline from powerline.lib import parsedotval from powerline.lib.unicode import unicode +from powerline.lib.encoding import (get_preferred_output_encoding, + get_preferred_environment_encoding) used_powerlines = WeakValueDictionary() @@ -65,7 +67,7 @@ class Args(object): def string(s): if type(s) is bytes: - return s.decode('utf-8', 'replace') + return s.decode(get_preferred_environment_encoding(), 'replace') else: return str(s) @@ -129,6 +131,7 @@ class Prompt(object): def __str__(self): zsh.eval('_POWERLINE_PARSER_STATE="${(%):-%_}"') + zsh.eval('_POWERLINE_SHORTENED_PATH="${(%):-%~}"') segment_info = { 'args': self.args, 'environ': environ, @@ -137,6 +140,8 @@ class Prompt(object): 'parser_state': zsh.getvalue('_POWERLINE_PARSER_STATE'), 'shortened_path': zsh.getvalue('_POWERLINE_SHORTENED_PATH'), } + zsh.setvalue('_POWERLINE_PARSER_STATE', None) + zsh.setvalue('_POWERLINE_SHORTENED_PATH', None) r = '' if self.above: for line in self.powerline.render_above_lines( @@ -151,9 +156,9 @@ class Prompt(object): ) if type(r) is not str: if type(r) is bytes: - return r.decode('utf-8') + return r.decode(get_preferred_output_encoding(), 'replace') else: - return r.encode('utf-8') + return r.encode(get_preferred_output_encoding(), 'replace') return r def __del__(self): diff --git a/powerline/bindings/zsh/powerline.zsh b/powerline/bindings/zsh/powerline.zsh index 0c097b66..be8422bf 100644 --- a/powerline/bindings/zsh/powerline.zsh +++ b/powerline/bindings/zsh/powerline.zsh @@ -36,7 +36,7 @@ _powerline_init_tmux_support() { } chpwd_functions+=( _powerline_tmux_set_pwd ) - trap "_powerline_tmux_set_columns" SIGWINCH + trap '_powerline_tmux_set_columns' SIGWINCH _powerline_tmux_set_columns _powerline_tmux_set_pwd fi @@ -103,10 +103,6 @@ _powerline_set_jobnum() { _POWERLINE_JOBNUM=${(%):-%j} } -_powerline_set_shortened_path() { - _POWERLINE_SHORTENED_PATH="${(%):-%~}" -} - _powerline_update_counter() { zpython '_powerline.precmd()' } @@ -115,13 +111,11 @@ _powerline_setup_prompt() { emulate -L zsh for f in "${precmd_functions[@]}"; do - if [[ "$f" = "_powerline_set_jobnum" ]]; then + if [[ "$f" = '_powerline_set_jobnum' ]]; then return fi done precmd_functions+=( _powerline_set_jobnum ) - chpwd_functions+=( _powerline_set_shortened_path ) - _powerline_set_shortened_path VIRTUAL_ENV_DISABLE_PROMPT=1 @@ -144,7 +138,7 @@ _powerline_setup_prompt() { add_args+=' --last_exit_code=$?' add_args+=' --last_pipe_status="$pipestatus"' add_args+=' --renderer_arg="client_id=$$"' - add_args+=' --renderer_arg="shortened_path=$_POWERLINE_SHORTENED_PATH"' + add_args+=' --renderer_arg="shortened_path=${(%):-%~}"' add_args+=' --jobnum=$_POWERLINE_JOBNUM' local new_args_2=' --renderer_arg="parser_state=${(%%):-%_}"' new_args_2+=' --renderer_arg="local_theme=continuation"' diff --git a/powerline/config_files/themes/ascii.json b/powerline/config_files/themes/ascii.json index 7875f219..7b49da21 100644 --- a/powerline/config_files/themes/ascii.json +++ b/powerline/config_files/themes/ascii.json @@ -29,13 +29,16 @@ "before": "" }, - "powerline.segments.common.network_load": { + "powerline.segments.common.net.network_load": { "args": { "recv_format": "DL {value:>8}", "sent_format": "UL {value:>8}" } }, - "powerline.segments.common.now_playing": { + "powerline.segments.common.net.hostname": { + "before": "H " + }, + "powerline.segments.common.players.now_playing": { "args": { "state_symbols": { "fallback": "", @@ -45,25 +48,22 @@ } } }, - "powerline.segments.common.battery": { + "powerline.segments.common.bat.battery": { "args": { "full_heart": "O", "empty_heart": "O" } }, - "powerline.segments.common.uptime": { + "powerline.segments.common.sys.uptime": { "before": "UP " }, - "powerline.segments.common.email_imap_alert": { + "powerline.segments.common.mail.email_imap_alert": { "before": "MAIL " }, - "powerline.segments.common.virtualenv": { + "powerline.segments.common.env.virtualenv": { "before": "(e) " }, - "powerline.segments.common.hostname": { - "before": "H " - }, - "powerline.segments.common.weather": { + "powerline.segments.common.wthr.weather": { "args": { "icons": { "day": "DAY", @@ -82,7 +82,7 @@ "temp_format": "{temp:.0f} C" } }, - "powerline.segments.common.fuzzy_time": { + "powerline.segments.common.time.fuzzy_time": { "args": { "unicode_text": false } diff --git a/powerline/config_files/themes/ipython/in.json b/powerline/config_files/themes/ipython/in.json index 6218b3ab..b8957753 100644 --- a/powerline/config_files/themes/ipython/in.json +++ b/powerline/config_files/themes/ipython/in.json @@ -1,9 +1,8 @@ { - "default_module": "powerline.segments.common", "segments": { "left": [ { - "function": "virtualenv", + "function": "powerline.segments.common.env.virtualenv", "priority": 10 }, { diff --git a/powerline/config_files/themes/ipython/in2.json b/powerline/config_files/themes/ipython/in2.json index 601fc9e5..ce54e1bb 100644 --- a/powerline/config_files/themes/ipython/in2.json +++ b/powerline/config_files/themes/ipython/in2.json @@ -1,5 +1,4 @@ { - "default_module": "powerline.segments.common", "segments": { "left": [ { diff --git a/powerline/config_files/themes/powerline.json b/powerline/config_files/themes/powerline.json index 859d8341..bba3b541 100644 --- a/powerline/config_files/themes/powerline.json +++ b/powerline/config_files/themes/powerline.json @@ -28,13 +28,16 @@ "before": "⌚ " }, - "powerline.segments.common.network_load": { + "powerline.segments.common.net.network_load": { "args": { "recv_format": "⬇ {value:>8}", "sent_format": "⬆ {value:>8}" } }, - "powerline.segments.common.now_playing": { + "powerline.segments.common.net.hostname": { + "before": " " + }, + "powerline.segments.common.players.now_playing": { "args": { "state_symbols": { "fallback": "♫", @@ -44,25 +47,22 @@ } } }, - "powerline.segments.common.battery": { + "powerline.segments.common.bat.battery": { "args": { "full_heart": "♥", "empty_heart": "♥" } }, - "powerline.segments.common.uptime": { + "powerline.segments.common.sys.uptime": { "before": "⇑ " }, - "powerline.segments.common.email_imap_alert": { + "powerline.segments.common.mail.email_imap_alert": { "before": "✉ " }, - "powerline.segments.common.virtualenv": { + "powerline.segments.common.env.virtualenv": { "before": "ⓔ " }, - "powerline.segments.common.hostname": { - "before": " " - }, - "powerline.segments.common.weather": { + "powerline.segments.common.wthr.weather": { "args": { "icons": { "day": "〇", @@ -80,7 +80,7 @@ } } }, - "powerline.segments.common.fuzzy_time": { + "powerline.segments.common.time.fuzzy_time": { "args": { "unicode_text": true } diff --git a/powerline/config_files/themes/shell/default.json b/powerline/config_files/themes/shell/default.json index 6ba1ba6d..480da5c9 100644 --- a/powerline/config_files/themes/shell/default.json +++ b/powerline/config_files/themes/shell/default.json @@ -1,20 +1,19 @@ { - "default_module": "powerline.segments.common", "segments": { "left": [ { "function": "powerline.segments.shell.mode" }, { - "function": "hostname", + "function": "powerline.segments.common.net.hostname", "priority": 10 }, { - "function": "user", + "function": "powerline.segments.common.env.user", "priority": 30 }, { - "function": "virtualenv", + "function": "powerline.segments.common.env.virtualenv", "priority": 50 }, { @@ -32,7 +31,7 @@ "priority": 10 }, { - "function": "branch", + "function": "powerline.segments.common.vcs.branch", "priority": 40 } ] diff --git a/powerline/config_files/themes/shell/default_leftonly.json b/powerline/config_files/themes/shell/default_leftonly.json index 018847ba..61e59f81 100644 --- a/powerline/config_files/themes/shell/default_leftonly.json +++ b/powerline/config_files/themes/shell/default_leftonly.json @@ -1,21 +1,20 @@ { - "default_module": "powerline.segments.common", "segments": { "left": [ { - "function": "hostname", + "function": "powerline.segments.common.net.hostname", "priority": 10 }, { - "function": "user", + "function": "powerline.segments.common.env.user", "priority": 30 }, { - "function": "virtualenv", + "function": "powerline.segments.common.env.virtualenv", "priority": 50 }, { - "function": "branch", + "function": "powerline.segments.common.vcs.branch", "priority": 40 }, { diff --git a/powerline/config_files/themes/tmux/default.json b/powerline/config_files/themes/tmux/default.json index 780f34f2..4532cedc 100644 --- a/powerline/config_files/themes/tmux/default.json +++ b/powerline/config_files/themes/tmux/default.json @@ -1,20 +1,19 @@ { - "default_module": "powerline.segments.common", "segments": { "right": [ { - "function": "uptime", + "function": "powerline.segments.common.sys.uptime", "priority": 50 }, { - "function": "system_load", + "function": "powerline.segments.common.sys.system_load", "priority": 50 }, { - "function": "date" + "function": "powerline.segments.common.time.date" }, { - "function": "date", + "function": "powerline.segments.common.time.date", "name": "time", "args": { "format": "%H:%M", @@ -22,7 +21,7 @@ } }, { - "function": "hostname" + "function": "powerline.segments.common.net.hostname" } ] } diff --git a/powerline/config_files/themes/unicode.json b/powerline/config_files/themes/unicode.json index a3b5a365..de74c23b 100644 --- a/powerline/config_files/themes/unicode.json +++ b/powerline/config_files/themes/unicode.json @@ -28,13 +28,16 @@ "before": "⌚ " }, - "powerline.segments.common.network_load": { + "powerline.segments.common.net.network_load": { "args": { "recv_format": "⬇ {value:>8}", "sent_format": "⬆ {value:>8}" } }, - "powerline.segments.common.now_playing": { + "powerline.segments.common.net.hostname": { + "before": "⌂ " + }, + "powerline.segments.common.players.now_playing": { "args": { "state_symbols": { "fallback": "♫", @@ -44,25 +47,22 @@ } } }, - "powerline.segments.common.battery": { + "powerline.segments.common.bat.battery": { "args": { "full_heart": "♥", "empty_heart": "♥" } }, - "powerline.segments.common.uptime": { + "powerline.segments.common.sys.uptime": { "before": "⇑ " }, - "powerline.segments.common.email_imap_alert": { + "powerline.segments.common.mail.email_imap_alert": { "before": "✉ " }, - "powerline.segments.common.virtualenv": { + "powerline.segments.common.env.virtualenv": { "before": "ⓔ " }, - "powerline.segments.common.hostname": { - "before": "⌂ " - }, - "powerline.segments.common.weather": { + "powerline.segments.common.wthr.weather": { "args": { "icons": { "day": "〇", @@ -80,7 +80,7 @@ } } }, - "powerline.segments.common.fuzzy_time": { + "powerline.segments.common.time.fuzzy_time": { "args": { "unicode_text": true } diff --git a/powerline/config_files/themes/unicode_terminus.json b/powerline/config_files/themes/unicode_terminus.json index e435ea93..33a06395 100644 --- a/powerline/config_files/themes/unicode_terminus.json +++ b/powerline/config_files/themes/unicode_terminus.json @@ -28,13 +28,16 @@ "before": "" }, - "powerline.segments.common.network_load": { + "powerline.segments.common.net.network_load": { "args": { "recv_format": "⇓ {value:>8}", "sent_format": "⇑ {value:>8}" } }, - "powerline.segments.common.now_playing": { + "powerline.segments.common.net.hostname": { + "before": "⌂ " + }, + "powerline.segments.common.players.now_playing": { "args": { "state_symbols": { "fallback": "♫", @@ -44,25 +47,22 @@ } } }, - "powerline.segments.common.battery": { + "powerline.segments.common.bat.battery": { "args": { "full_heart": "♥", "empty_heart": "♥" } }, - "powerline.segments.common.uptime": { + "powerline.segments.common.sys.uptime": { "before": "↑ " }, - "powerline.segments.common.email_imap_alert": { + "powerline.segments.common.mail.email_imap_alert": { "before": "MAIL " }, - "powerline.segments.common.virtualenv": { + "powerline.segments.common.env.virtualenv": { "before": "(e) " }, - "powerline.segments.common.hostname": { - "before": "⌂ " - }, - "powerline.segments.common.weather": { + "powerline.segments.common.wthr.weather": { "args": { "icons": { "day": "DAY", @@ -80,7 +80,7 @@ } } }, - "powerline.segments.common.fuzzy_time": { + "powerline.segments.common.time.fuzzy_time": { "args": { "unicode_text": true } diff --git a/powerline/config_files/themes/unicode_terminus_condensed.json b/powerline/config_files/themes/unicode_terminus_condensed.json index c4266ee6..a386c7cd 100644 --- a/powerline/config_files/themes/unicode_terminus_condensed.json +++ b/powerline/config_files/themes/unicode_terminus_condensed.json @@ -29,13 +29,16 @@ "before": "" }, - "powerline.segments.common.network_load": { + "powerline.segments.common.net.network_load": { "args": { "recv_format": "⇓{value:>8}", "sent_format": "⇑{value:>8}" } }, - "powerline.segments.common.now_playing": { + "powerline.segments.common.net.hostname": { + "before": "⌂" + }, + "powerline.segments.common.players.now_playing": { "args": { "state_symbols": { "fallback": "♫", @@ -45,25 +48,22 @@ } } }, - "powerline.segments.common.battery": { + "powerline.segments.common.bat.battery": { "args": { "full_heart": "♥", "empty_heart": "♥" } }, - "powerline.segments.common.uptime": { + "powerline.segments.common.sys.uptime": { "before": "↑" }, - "powerline.segments.common.email_imap_alert": { + "powerline.segments.common.mail.email_imap_alert": { "before": "M " }, - "powerline.segments.common.virtualenv": { + "powerline.segments.common.env.virtualenv": { "before": "E " }, - "powerline.segments.common.hostname": { - "before": "⌂" - }, - "powerline.segments.common.weather": { + "powerline.segments.common.wthr.weather": { "args": { "icons": { "day": "D", @@ -81,7 +81,7 @@ } } }, - "powerline.segments.common.fuzzy_time": { + "powerline.segments.common.time.fuzzy_time": { "args": { "unicode_text": true } diff --git a/powerline/config_files/themes/wm/default.json b/powerline/config_files/themes/wm/default.json index 009c4924..579080d1 100644 --- a/powerline/config_files/themes/wm/default.json +++ b/powerline/config_files/themes/wm/default.json @@ -1,16 +1,15 @@ { - "default_module": "powerline.segments.common", "segments": { "right": [ { - "function": "weather", + "function": "powerline.segments.common.wthr.weather", "priority": 50 }, { - "function": "date" + "function": "powerline.segments.common.time.date" }, { - "function": "date", + "function": "powerline.segments.common.time.date", "name": "time", "args": { "format": "%H:%M", @@ -18,7 +17,7 @@ } }, { - "function": "email_imap_alert", + "function": "powerline.segments.common.mail.email_imap_alert", "priority": 10, "args": { "username": "", diff --git a/powerline/ipython.py b/powerline/ipython.py index 331f6927..326114df 100644 --- a/powerline/ipython.py +++ b/powerline/ipython.py @@ -24,10 +24,11 @@ class RewriteResult(object): class IPythonPowerline(Powerline): - def init(self): + def init(self, **kwargs): super(IPythonPowerline, self).init( 'ipython', - use_daemon_threads=True + use_daemon_threads=True, + **kwargs ) def get_config_paths(self): diff --git a/powerline/lib/debug.py b/powerline/lib/debug.py index 478b9ed8..fc1ffeea 100755 --- a/powerline/lib/debug.py +++ b/powerline/lib/debug.py @@ -24,25 +24,25 @@ def print_cycles(objects, outstream=sys.stdout, show_progress=False): ''' def print_path(path): for i, step in enumerate(path): - # next "wraps around" + # next “wraps around” next = path[(i + 1) % len(path)] - outstream.write(" %s -- " % str(type(step))) + outstream.write(' %s -- ' % str(type(step))) written = False if isinstance(step, dict): for key, val in step.items(): if val is next: - outstream.write("[%s]" % repr(key)) + outstream.write('[%s]' % repr(key)) written = True break if key is next: - outstream.write("[key] = %s" % repr(val)) + outstream.write('[key] = %s' % repr(val)) written = True break elif isinstance(step, (list, tuple)): for i, item in enumerate(step): if item is next: - outstream.write("[%d]" % i) + outstream.write('[%d]' % i) written = True elif getattr(type(step), '__getattribute__', None) in (object.__getattribute__, type.__getattribute__): for attr in chain(dir(step), getattr(step, '__dict__', ())): @@ -55,18 +55,18 @@ def print_cycles(objects, outstream=sys.stdout, show_progress=False): break if not written: outstream.write(repr(step)) - outstream.write(" ->\n") - outstream.write("\n") + outstream.write(' ->\n') + outstream.write('\n') def recurse(obj, start, all, current_path): if show_progress: - outstream.write("%d\r" % len(all)) + outstream.write('%d\r' % len(all)) all[id(obj)] = None referents = gc.get_referents(obj) for referent in referents: - # If we've found our way back to the start, this is + # If we’ve found our way back to the start, this is # a cycle, so print it out if referent is start: try: @@ -78,13 +78,13 @@ def print_cycles(objects, outstream=sys.stdout, show_progress=False): outstream.write('Cyclic reference: %i\n' % id(referent)) print_path(current_path) - # Don't go back through the original list of objects, or + # Don’t go back through the original list of objects, or # through temporary references to the object, since those # are just an artifact of the cycle detector itself. elif referent is objects or isinstance(referent, FrameType): continue - # We haven't seen this object before, so recurse + # We haven’t seen this object before, so recurse elif id(referent) not in all: recurse(referent, start, all, current_path + (obj,)) diff --git a/powerline/lib/encoding.py b/powerline/lib/encoding.py new file mode 100644 index 00000000..24b0e4bd --- /dev/null +++ b/powerline/lib/encoding.py @@ -0,0 +1,73 @@ +# vim:fileencoding=utf-8:noet + +'''Encodings support + +This is the only module from which functions obtaining encoding should be +exported. Note: you should always care about errors= argument since it is not +guaranteed that encoding returned by some function can encode/decode given +string. + +All functions in this module must always return a valid encoding. Most of them +are not thread-safe. +''' + +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import sys +import locale + + +def get_preferred_file_name_encoding(): + '''Get preferred file name encoding + ''' + return ( + sys.getfilesystemencoding() + or locale.getpreferredencoding() + or 'utf-8' + ) + + +def get_preferred_file_contents_encoding(): + '''Get encoding preferred for file contents + ''' + return ( + locale.getpreferredencoding() + or 'utf-8' + ) + + +def get_preferred_output_encoding(): + '''Get encoding that should be used for printing strings + + .. warning:: + Falls back to ASCII, so that output is most likely to be displayed + correctly. + ''' + return ( + locale.getlocale(locale.LC_MESSAGES)[1] + or locale.getdefaultlocale()[1] + or 'ascii' + ) + + +def get_preferred_input_encoding(): + '''Get encoding that should be used for reading shell command output + + .. warning:: + Falls back to latin1 so that function is less likely to throw as decoded + output is primary searched for ASCII values. + ''' + return ( + locale.getlocale(locale.LC_MESSAGES)[1] + or locale.getdefaultlocale()[1] + or 'latin1' + ) + + +def get_preferred_environment_encoding(): + '''Get encoding that should be used for decoding environment variables + ''' + return ( + locale.getpreferredencoding() + or 'utf-8' + ) diff --git a/powerline/lib/inotify.py b/powerline/lib/inotify.py index 510008ed..174d88f8 100644 --- a/powerline/lib/inotify.py +++ b/powerline/lib/inotify.py @@ -9,6 +9,8 @@ import struct from ctypes.util import find_library +from powerline.lib.encoding import get_preferred_file_name_encoding + __copyright__ = '2013, Kovid Goyal ' __docformat__ = 'restructuredtext en' @@ -39,27 +41,27 @@ def load_inotify(): if not name: raise INotifyError('Cannot find C library') libc = ctypes.CDLL(name, use_errno=True) - for function in ("inotify_add_watch", "inotify_init1", "inotify_rm_watch"): + for function in ('inotify_add_watch', 'inotify_init1', 'inotify_rm_watch'): if not hasattr(libc, function): raise INotifyError('libc is too old') # inotify_init1() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, use_errno=True) - init1 = prototype(('inotify_init1', libc), ((1, "flags", 0),)) + init1 = prototype(('inotify_init1', libc), ((1, 'flags', 0),)) # inotify_add_watch() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint32, use_errno=True) add_watch = prototype(('inotify_add_watch', libc), ( - (1, "fd"), (1, "pathname"), (1, "mask"))) + (1, 'fd'), (1, 'pathname'), (1, 'mask'))) # inotify_rm_watch() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int, use_errno=True) rm_watch = prototype(('inotify_rm_watch', libc), ( - (1, "fd"), (1, "wd"))) + (1, 'fd'), (1, 'wd'))) # read() prototype = ctypes.CFUNCTYPE(ctypes.c_ssize_t, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, use_errno=True) read = prototype(('read', libc), ( - (1, "fd"), (1, "buf"), (1, "count"))) + (1, 'fd'), (1, 'buf'), (1, 'count'))) _inotify = (init1, add_watch, rm_watch, read) return _inotify @@ -121,10 +123,8 @@ class INotify(object): raise INotifyError(os.strerror(ctypes.get_errno())) self._buf = ctypes.create_string_buffer(5000) - self.fenc = sys.getfilesystemencoding() or 'utf-8' + self.fenc = get_preferred_file_name_encoding() self.hdr = struct.Struct(b'iIII') - if self.fenc == 'ascii': - self.fenc = 'utf-8' # We keep a reference to os to prevent it from being deleted # during interpreter shutdown, which would lead to errors in the # __del__ method @@ -176,7 +176,7 @@ class INotify(object): pos += self.hdr.size name = None if get_name: - name = raw[pos:pos + name_len].rstrip(b'\0').decode(self.fenc) + name = raw[pos:pos + name_len].rstrip(b'\0') pos += name_len self.process_event(wd, mask, cookie, name) diff --git a/powerline/lib/path.py b/powerline/lib/path.py index a55d3e0a..49ff433c 100644 --- a/powerline/lib/path.py +++ b/powerline/lib/path.py @@ -6,3 +6,13 @@ import os def realpath(path): return os.path.abspath(os.path.realpath(path)) + + +def join(*components): + if any((isinstance(p, bytes) for p in components)): + return os.path.join(*[ + p if isinstance(p, bytes) else p.encode('ascii') + for p in components + ]) + else: + return os.path.join(*components) diff --git a/powerline/lib/shell.py b/powerline/lib/shell.py index 3359c843..df7cc65e 100644 --- a/powerline/lib/shell.py +++ b/powerline/lib/shell.py @@ -5,9 +5,10 @@ import sys import os from subprocess import Popen, PIPE -from locale import getlocale, getdefaultlocale, LC_MESSAGES from functools import partial +from powerline.lib.encoding import get_preferred_input_encoding + if sys.platform.startswith('win32'): # Prevent windows from launching consoles when calling commands @@ -15,10 +16,6 @@ if sys.platform.startswith('win32'): Popen = partial(Popen, creationflags=0x08000000) -def _get_shell_encoding(): - return getlocale(LC_MESSAGES)[1] or getdefaultlocale()[1] or 'utf-8' - - def run_cmd(pl, cmd, stdin=None): '''Run command and return its stdout, stripped @@ -38,7 +35,7 @@ def run_cmd(pl, cmd, stdin=None): return None else: stdout, err = p.communicate(stdin) - stdout = stdout.decode(_get_shell_encoding()) + stdout = stdout.decode(get_preferred_input_encoding()) return stdout.strip() @@ -56,7 +53,7 @@ def readlines(cmd, cwd): Working directory of the command which will be run. ''' p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd) - encoding = _get_shell_encoding() + encoding = get_preferred_input_encoding() p.stderr.close() with p.stdout: for line in p.stdout: @@ -69,15 +66,14 @@ except ImportError: # shutil.which was added in python-3.3. Here is what was added: # Lib/shutil.py, commit 5abe28a9c8fe701ba19b1db5190863384e96c798 def which(cmd, mode=os.F_OK | os.X_OK, path=None): - """Given a command, mode, and a PATH string, return the path which - conforms to the given mode on the PATH, or None if there is no such + '''Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such file. - `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result - of os.environ.get("PATH"), or can be overridden with a custom search + ``mode`` defaults to os.F_OK | os.X_OK. ``path`` defaults to the result + of ``os.environ.get('PATH')``, or can be overridden with a custom search path. - - """ + ''' # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. @@ -88,7 +84,7 @@ except ImportError: and not os.path.isdir(fn) ) - # If we're given a path with a directory part, look it up directly rather + # If we’re given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): @@ -97,20 +93,20 @@ except ImportError: return None if path is None: - path = os.environ.get("PATH", os.defpath) + path = os.environ.get('PATH', os.defpath) if not path: return None path = path.split(os.pathsep) - if sys.platform == "win32": + if sys.platform == 'win32': # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. - pathext = os.environ.get("PATHEXT", "").split(os.pathsep) + pathext = os.environ.get('PATHEXT', '').split(os.pathsep) # See if the given file matches any of the expected path extensions. - # This will allow us to short circuit when given "python.exe". + # This will allow us to short circuit when given 'python.exe'. # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): @@ -118,7 +114,7 @@ except ImportError: else: files = [cmd + ext for ext in pathext] else: - # On other platforms you don't have things like PATHEXT to tell you + # On other platforms you don’t have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] diff --git a/powerline/lib/threaded.py b/powerline/lib/threaded.py index 5e30988a..01ad2a0d 100644 --- a/powerline/lib/threaded.py +++ b/powerline/lib/threaded.py @@ -102,7 +102,7 @@ class ThreadedSegment(Segment, MultiRunnedThread): def shutdown(self): self.shutdown_event.set() if self.daemon and self.is_alive(): - # Give the worker thread a chance to shutdown, but don't block for + # Give the worker thread a chance to shutdown, but don’t block for # too long self.join(0.01) diff --git a/powerline/lib/unicode.py b/powerline/lib/unicode.py index cea8b05f..32ea3afe 100644 --- a/powerline/lib/unicode.py +++ b/powerline/lib/unicode.py @@ -1,7 +1,10 @@ # vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) -from locale import getpreferredencoding +import sys +import codecs + +from powerline.lib.encoding import get_preferred_output_encoding try: @@ -25,6 +28,62 @@ def u(s): return unicode(s, 'utf-8') +if sys.version_info < (3,): + def tointiter(s): + '''Convert a byte string to the sequence of integers + ''' + return (ord(c) for c in s) +else: + def tointiter(s): + '''Convert a byte string to the sequence of integers + ''' + return iter(s) + + +def powerline_decode_error(e): + if not isinstance(e, UnicodeDecodeError): + raise NotImplementedError + return (''.join(( + '<{0:02X}>'.format(c) + for c in tointiter(e.object[e.start:e.end]) + )), e.end) + + +codecs.register_error('powerline_decode_error', powerline_decode_error) + + +last_swe_idx = 0 + + +def register_strwidth_error(strwidth): + global last_swe_idx + last_swe_idx += 1 + + def powerline_encode_strwidth_error(e): + if not isinstance(e, UnicodeEncodeError): + raise NotImplementedError + return ('?' * strwidth(e.object[e.start:e.end]), e.end) + + ename = 'powerline_encode_strwidth_error_{0}'.format(last_swe_idx) + codecs.register_error(ename, powerline_encode_strwidth_error) + return ename + + +def out_u(s): + '''Return unicode string suitable for displaying + + Unlike other functions assumes get_preferred_output_encoding() first. Unlike + u() does not throw exceptions for invalid unicode strings. Unlike + safe_unicode() does throw an exception if object is not a string. + ''' + if isinstance(s, unicode): + return s + elif isinstance(s, bytes): + return unicode(s, get_preferred_output_encoding(), 'powerline_decode_error') + else: + raise TypeError('Expected unicode or bytes instance, got {0}'.format(repr(type(s)))) + + def safe_unicode(s): '''Return unicode instance without raising an exception. @@ -33,7 +92,7 @@ def safe_unicode(s): * UTF-8 string * Object with __str__() or __repr__() method that returns UTF-8 string or unicode object (depending on python version) - * String in locale.getpreferredencoding() encoding + * String in powerline.lib.encoding.get_preferred_output_encoding() encoding * If everything failed use safe_unicode on last exception with which everything failed ''' @@ -46,7 +105,7 @@ def safe_unicode(s): except TypeError: return unicode(str(s), 'utf-8') except UnicodeDecodeError: - return unicode(s, getpreferredencoding()) + return unicode(s, get_preferred_output_encoding()) except Exception as e: return safe_unicode(e) diff --git a/powerline/lib/vcs/__init__.py b/powerline/lib/vcs/__init__.py index 96dee683..1b7f5e23 100644 --- a/powerline/lib/vcs/__init__.py +++ b/powerline/lib/vcs/__init__.py @@ -8,6 +8,8 @@ from threading import Lock from collections import defaultdict from powerline.lib.watcher import create_tree_watcher +from powerline.lib.unicode import out_u +from powerline.lib.path import join def generate_directories(path): @@ -75,10 +77,10 @@ def get_branch_name(directory, config_file, get_func, create_watcher): raise # Config file does not exist (happens for mercurial) if config_file not in branch_name_cache: - branch_name_cache[config_file] = get_func(directory, config_file) + branch_name_cache[config_file] = out_u(get_func(directory, config_file)) if changed: # Config file has changed or was not tracked - branch_name_cache[config_file] = get_func(directory, config_file) + branch_name_cache[config_file] = out_u(get_func(directory, config_file)) return branch_name_cache[config_file] @@ -96,7 +98,7 @@ class FileStatusCache(dict): if nparent == parent: break parent = nparent - ignore_files.add(os.path.join(parent, ignore_file_name)) + ignore_files.add(join(parent, ignore_file_name)) for f in extra_ignore_files: ignore_files.add(f) self.keypath_ignore_map[keypath] = ignore_files @@ -120,7 +122,7 @@ file_status_cache = FileStatusCache() def get_file_status(directory, dirstate_file, file_path, ignore_file_name, get_func, create_watcher, extra_ignore_files=()): global file_status_cache - keypath = file_path if os.path.isabs(file_path) else os.path.join(directory, file_path) + keypath = file_path if os.path.isabs(file_path) else join(directory, file_path) file_status_cache.update_maps(keypath, directory, dirstate_file, ignore_file_name, extra_ignore_files) with file_status_lock: @@ -218,9 +220,15 @@ vcs_props = ( ) +vcs_props_bytes = [ + (vcs, vcs_dir.encode('ascii'), check) + for vcs, vcs_dir, check in vcs_props +] + + def guess(path, create_watcher): for directory in generate_directories(path): - for vcs, vcs_dir, check in vcs_props: + for vcs, vcs_dir, check in (vcs_props_bytes if isinstance(path, bytes) else vcs_props): repo_dir = os.path.join(directory, vcs_dir) if check(repo_dir): if os.path.isdir(repo_dir) and not os.access(repo_dir, os.X_OK): @@ -245,7 +253,7 @@ def debug(): '''Test run guess(), repo.branch() and repo.status() To use:: - python -c "from powerline.lib.vcs import debug; debug()" some_file_to_watch. + python -c 'from powerline.lib.vcs import debug; debug()' some_file_to_watch. ''' import sys dest = sys.argv[-1] diff --git a/powerline/lib/vcs/bzr.py b/powerline/lib/vcs/bzr.py index 012f612e..e47d8b20 100644 --- a/powerline/lib/vcs/bzr.py +++ b/powerline/lib/vcs/bzr.py @@ -1,7 +1,6 @@ # vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) -import sys import os import re @@ -10,12 +9,14 @@ from io import StringIO from bzrlib import (workingtree, status, library_state, trace, ui) from powerline.lib.vcs import get_branch_name, get_file_status +from powerline.lib.path import join +from powerline.lib.encoding import get_preferred_file_contents_encoding class CoerceIO(StringIO): def write(self, arg): if isinstance(arg, bytes): - arg = arg.decode('utf-8', 'replace') + arg = arg.decode(get_preferred_file_contents_encoding(), 'replace') return super(CoerceIO, self).write(arg) @@ -29,7 +30,7 @@ def branch_name_from_config_file(directory, config_file): for line in f: m = nick_pat.match(line) if m is not None: - ans = m.group(1).strip().decode('utf-8', 'replace') + ans = m.group(1).strip().decode(get_preferred_file_contents_encoding(), 'replace') break except Exception: pass @@ -41,8 +42,6 @@ state = None class Repository(object): def __init__(self, directory, create_watcher): - if isinstance(directory, bytes): - directory = directory.decode(sys.getfilesystemencoding() or sys.getdefaultencoding() or 'utf-8') self.directory = os.path.abspath(directory) self.create_watcher = create_watcher @@ -51,8 +50,8 @@ class Repository(object): Without file argument: returns status of the repository: - :"D?": dirty (tracked modified files: added, removed, deleted, modified), - :"?U": untracked-dirty (added, but not tracked files) + :'D?': dirty (tracked modified files: added, removed, deleted, modified), + :'?U': untracked-dirty (added, but not tracked files) :None: clean (status is empty) With file argument: returns status of this file: The status codes are @@ -61,7 +60,7 @@ class Repository(object): if path is not None: return get_file_status( directory=self.directory, - dirstate_file=os.path.join(self.directory, '.bzr', 'checkout', 'dirstate'), + dirstate_file=join(self.directory, '.bzr', 'checkout', 'dirstate'), file_path=path, ignore_file_name='.bzrignore', get_func=self.do_status, @@ -100,7 +99,7 @@ class Repository(object): return ans if ans.strip() else None def branch(self): - config_file = os.path.join(self.directory, '.bzr', 'branch', 'branch.conf') + config_file = join(self.directory, '.bzr', 'branch', 'branch.conf') return get_branch_name( directory=self.directory, config_file=config_file, diff --git a/powerline/lib/vcs/git.py b/powerline/lib/vcs/git.py index 21b5e434..c1d77a61 100644 --- a/powerline/lib/vcs/git.py +++ b/powerline/lib/vcs/git.py @@ -2,11 +2,14 @@ from __future__ import (unicode_literals, division, absolute_import, print_function) import os -import sys import re from powerline.lib.vcs import get_branch_name, get_file_status from powerline.lib.shell import readlines +from powerline.lib.path import join +from powerline.lib.encoding import (get_preferred_file_name_encoding, + get_preferred_file_contents_encoding) +from powerline.lib.shell import which _ref_pat = re.compile(br'ref:\s*refs/heads/(.+)') @@ -20,20 +23,22 @@ def branch_name_from_config_file(directory, config_file): return os.path.basename(directory) m = _ref_pat.match(raw) if m is not None: - return m.group(1).decode('utf-8', 'replace') + return m.group(1).decode(get_preferred_file_contents_encoding(), 'replace') return raw[:7] def git_directory(directory): - path = os.path.join(directory, '.git') + path = join(directory, '.git') if os.path.isfile(path): with open(path, 'rb') as f: raw = f.read() if not raw.startswith(b'gitdir: '): raise IOError('invalid gitfile format') - raw = raw[8:].decode(sys.getfilesystemencoding() or 'utf-8') - if raw[-1] == '\n': + raw = raw[8:] + if raw[-1:] == b'\n': raw = raw[:-1] + if not isinstance(path, bytes): + raw = raw.decode(get_preferred_file_name_encoding()) if not raw: raise IOError('no path in gitfile') return os.path.abspath(os.path.join(directory, raw)) @@ -59,28 +64,28 @@ class GitRepository(object): :None: repository clean With file argument: returns status of this file. Output is - equivalent to the first two columns of "git status --porcelain" + equivalent to the first two columns of ``git status --porcelain`` (except for merge statuses as they are not supported by libgit2). ''' if path: gitd = git_directory(self.directory) # We need HEAD as without it using fugitive to commit causes the - # current file's status (and only the current file) to not be updated + # current file’s status (and only the current file) to not be updated # for some reason I cannot be bothered to figure out. return get_file_status( directory=self.directory, - dirstate_file=os.path.join(gitd, 'index'), + dirstate_file=join(gitd, 'index'), file_path=path, ignore_file_name='.gitignore', get_func=self.do_status, create_watcher=self.create_watcher, - extra_ignore_files=tuple(os.path.join(gitd, x) for x in ('logs/HEAD', 'info/exclude')), + extra_ignore_files=tuple(join(gitd, x) for x in ('logs/HEAD', 'info/exclude')), ) return self.do_status(self.directory, path) def branch(self): directory = git_directory(self.directory) - head = os.path.join(directory, 'HEAD') + head = join(directory, 'HEAD') return get_branch_name( directory=directory, config_file=head, @@ -151,6 +156,11 @@ try: return r if r != ' ' else None except ImportError: class Repository(GitRepository): + def __init__(self, *args, **kwargs): + if not which('git'): + raise OSError('git executable is not available') + super(Repository, self).__init__(*args, **kwargs) + @staticmethod def ignore_event(path, name): # Ignore changes to the index.lock file, since they happen diff --git a/powerline/lib/vcs/mercurial.py b/powerline/lib/vcs/mercurial.py index 71963dd3..4a541303 100644 --- a/powerline/lib/vcs/mercurial.py +++ b/powerline/lib/vcs/mercurial.py @@ -6,13 +6,15 @@ import os from mercurial import hg, ui, match from powerline.lib.vcs import get_branch_name, get_file_status +from powerline.lib.path import join +from powerline.lib.encoding import get_preferred_file_contents_encoding def branch_name_from_config_file(directory, config_file): try: with open(config_file, 'rb') as f: raw = f.read() - return raw.decode('utf-8', 'replace').strip() + return raw.decode(get_preferred_file_contents_encoding(), 'replace').strip() except Exception: return 'default' @@ -39,18 +41,18 @@ class Repository(object): Without file argument: returns status of the repository: - :"D?": dirty (tracked modified files: added, removed, deleted, modified), - :"?U": untracked-dirty (added, but not tracked files) + :'D?': dirty (tracked modified files: added, removed, deleted, modified), + :'?U': untracked-dirty (added, but not tracked files) :None: clean (status is empty) - With file argument: returns status of this file: "M"odified, "A"dded, - "R"emoved, "D"eleted (removed from filesystem, but still tracked), - "U"nknown, "I"gnored, (None)Clean. + With file argument: returns status of this file: `M`odified, `A`dded, + `R`emoved, `D`eleted (removed from filesystem, but still tracked), + `U`nknown, `I`gnored, (None)Clean. ''' if path: return get_file_status( directory=self.directory, - dirstate_file=os.path.join(self.directory, '.hg', 'dirstate'), + dirstate_file=join(self.directory, '.hg', 'dirstate'), file_path=path, ignore_file_name='.hgignore', get_func=self.do_status, @@ -75,7 +77,7 @@ class Repository(object): return self.repo_statuses_str[resulting_status] def branch(self): - config_file = os.path.join(self.directory, '.hg', 'branch') + config_file = join(self.directory, '.hg', 'branch') return get_branch_name( directory=self.directory, config_file=config_file, diff --git a/powerline/lib/watcher/inotify.py b/powerline/lib/watcher/inotify.py index 6708b21e..4a134e8d 100644 --- a/powerline/lib/watcher/inotify.py +++ b/powerline/lib/watcher/inotify.py @@ -247,6 +247,8 @@ class INotifyTreeWatcher(INotify): if mask & self.CREATE: # A new sub-directory might have been created, monitor it. try: + if not isinstance(path, bytes): + name = name.decode(self.fenc) self.add_watch(os.path.join(path, name)) except OSError as e: if e.errno == errno.ENOENT: diff --git a/powerline/lib/watcher/uv.py b/powerline/lib/watcher/uv.py index 02fdfcc6..eba020e1 100644 --- a/powerline/lib/watcher/uv.py +++ b/powerline/lib/watcher/uv.py @@ -7,8 +7,10 @@ from collections import defaultdict from threading import RLock from functools import partial from threading import Thread +from errno import ENOENT from powerline.lib.path import realpath +from powerline.lib.encoding import get_preferred_file_name_encoding class UvNotFound(NotImplementedError): @@ -55,15 +57,24 @@ def start_uv_thread(): return _uv_thread.uv_loop +def normpath(path, fenc): + path = realpath(path) + if isinstance(path, bytes): + return path.decode(fenc) + else: + return path + + class UvWatcher(object): def __init__(self): import_pyuv() self.watches = {} self.lock = RLock() self.loop = start_uv_thread() + self.fenc = get_preferred_file_name_encoding() def watch(self, path): - path = realpath(path) + path = normpath(path, self.fenc) with self.lock: if path not in self.watches: try: @@ -76,12 +87,12 @@ class UvWatcher(object): except pyuv.error.FSEventError as e: code = e.args[0] if code == pyuv.errno.UV_ENOENT: - raise OSError('No such file or directory: ' + path) + raise OSError(ENOENT, 'No such file or directory: ' + path) else: raise def unwatch(self, path): - path = realpath(path) + path = normpath(path, self.fenc) with self.lock: try: watch = self.watches.pop(path) @@ -91,7 +102,7 @@ class UvWatcher(object): def is_watching(self, path): with self.lock: - return realpath(path) in self.watches + return normpath(path, self.fenc) in self.watches def __del__(self): try: @@ -121,7 +132,7 @@ class UvFileWatcher(UvWatcher): self.events.pop(path, None) def __call__(self, path): - path = realpath(path) + path = normpath(path, self.fenc) with self.lock: events = self.events.pop(path, None) if events: @@ -138,14 +149,15 @@ class UvTreeWatcher(UvWatcher): def __init__(self, basedir, ignore_event=None): super(UvTreeWatcher, self).__init__() self.ignore_event = ignore_event or (lambda path, name: False) - self.basedir = realpath(basedir) + self.basedir = normpath(basedir, self.fenc) self.modified = True self.watch_directory(self.basedir) def watch_directory(self, path): - os.path.walk(realpath(path), self.watch_one_directory, None) + for root, dirs, files in os.walk(normpath(path, self.fenc)): + self.watch_one_directory(root) - def watch_one_directory(self, arg, dirname, fnames): + def watch_one_directory(self, dirname): try: self.watch(dirname) except OSError: diff --git a/powerline/lint/__init__.py b/powerline/lint/__init__.py index 6a5d86d5..49cb511a 100644 --- a/powerline/lint/__init__.py +++ b/powerline/lint/__init__.py @@ -1,565 +1,41 @@ # vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) -import itertools -import sys import os -import re import logging from collections import defaultdict -from copy import copy from functools import partial -from powerline.lint.markedjson import load from powerline import generate_config_finder, get_config_paths, load_config -from powerline.lib.config import ConfigLoader -from powerline.lint.markedjson.error import echoerr, MarkedError, Mark -from powerline.lint.markedjson.markedvalue import MarkedUnicode from powerline.segments.vim import vim_modes -from powerline.lint.inspect import getconfigargspec -from powerline.lib.threaded import ThreadedSegment from powerline.lib import mergedicts_copy +from powerline.lib.config import ConfigLoader from powerline.lib.unicode import unicode +from powerline.lint.markedjson import load +from powerline.lint.markedjson.error import echoerr, EchoErr, MarkedError +from powerline.lint.checks import (check_matcher_func, check_ext, check_config, check_top_theme, + check_color, check_translated_group_name, check_group, + check_segment_module, check_exinclude_function, type_keys, + check_segment_function, check_args, get_one_segment_function, + check_highlight_groups, check_highlight_group, check_full_segment_data, + get_all_possible_functions, check_segment_data_key) +from powerline.lint.spec import Spec +from powerline.lint.context import Context def open_file(path): return open(path, 'rb') -EMPTYTUPLE = tuple() - - -class JStr(unicode): - def join(self, iterable): - return super(JStr, self).join((unicode(item) for item in iterable)) - - -key_sep = JStr('/') -list_sep = JStr(', ') - - -def init_context(config): - return ((MarkedUnicode('', config.mark), config),) - - -def context_key(context): - return key_sep.join((c[0] for c in context)) - - -def havemarks(*args, **kwargs): - origin = kwargs.get('origin', '') - for i, v in enumerate(args): - if not hasattr(v, 'mark'): - raise AssertionError('Value #{0}/{1} ({2!r}) has no attribute `mark`'.format(origin, i, v)) - if isinstance(v, dict): - for key, val in v.items(): - havemarks(key, val, origin=(origin + '[' + unicode(i) + ']/' + unicode(key))) - elif isinstance(v, list): - havemarks(*v, origin=(origin + '[' + unicode(i) + ']')) - - -def context_has_marks(context): - for i, v in enumerate(context): - havemarks(v[0], origin='context key') - havemarks(v[1], origin='context val') - - -class EchoErr(object): - __slots__ = ('echoerr', 'logger',) - - def __init__(self, echoerr, logger): - self.echoerr = echoerr - self.logger = logger - - def __call__(self, *args, **kwargs): - self.echoerr(*args, **kwargs) - - -class DelayedEchoErr(EchoErr): - __slots__ = ('echoerr', 'logger', 'errs') - - def __init__(self, echoerr): - super(DelayedEchoErr, self).__init__(echoerr, echoerr.logger) - self.errs = [] - - def __call__(self, *args, **kwargs): - self.errs.append((args, kwargs)) - - def echo_all(self): - for args, kwargs in self.errs: - self.echoerr(*args, **kwargs) - - def __nonzero__(self): - return not not self.errs - - __bool__ = __nonzero__ - - -def new_context_item(key, value): - return ((value.keydict[key], value[key]),) - - -class Spec(object): - def __init__(self, **keys): - self.specs = [] - self.keys = {} - self.checks = [] - self.cmsg = '' - self.isoptional = False - self.uspecs = [] - self.ufailmsg = lambda key: 'found unknown key: {0}'.format(key) - self.did_type = False - self.update(**keys) - - def update(self, **keys): - for k, v in keys.items(): - self.keys[k] = len(self.specs) - self.specs.append(v) - if self.keys and not self.did_type: - self.type(dict) - self.did_type = True - return self - - def copy(self, copied=None): - copied = copied or {} - try: - return copied[id(self)] - except KeyError: - instance = self.__class__() - copied[id(self)] = instance - return self.__class__()._update(self.__dict__, copied) - - def _update(self, d, copied): - self.__dict__.update(d) - self.keys = copy(self.keys) - self.checks = copy(self.checks) - self.uspecs = copy(self.uspecs) - self.specs = [spec.copy(copied) for spec in self.specs] - return self - - def unknown_spec(self, keyfunc, spec): - if isinstance(keyfunc, Spec): - self.specs.append(keyfunc) - keyfunc = len(self.specs) - 1 - self.specs.append(spec) - self.uspecs.append((keyfunc, len(self.specs) - 1)) - return self - - def unknown_msg(self, msgfunc): - self.ufailmsg = msgfunc - return self - - def context_message(self, msg): - self.cmsg = msg - for spec in self.specs: - if not spec.cmsg: - spec.context_message(msg) - return self - - def check_type(self, value, context_mark, data, context, echoerr, types): - havemarks(value) - if type(value.value) not in types: - echoerr( - context=self.cmsg.format(key=context_key(context)), - context_mark=context_mark, - problem='{0!r} must be a {1} instance, not {2}'.format( - value, - list_sep.join((t.__name__ for t in types)), - type(value.value).__name__ - ), - problem_mark=value.mark - ) - return False, True - return True, False - - def check_func(self, value, context_mark, data, context, echoerr, func, msg_func): - havemarks(value) - proceed, echo, hadproblem = func(value, data, context, echoerr) - if echo and hadproblem: - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=context_mark, - problem=msg_func(value), - problem_mark=value.mark) - return proceed, hadproblem - - def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func): - havemarks(value) - i = 0 - hadproblem = False - for item in value: - havemarks(item) - if isinstance(item_func, int): - spec = self.specs[item_func] - proceed, fhadproblem = spec.match( - item, - value.mark, - data, - context + ((MarkedUnicode('list item ' + unicode(i), item.mark), item),), - echoerr - ) - else: - proceed, echo, fhadproblem = item_func(item, data, context, echoerr) - if echo and fhadproblem: - echoerr(context=self.cmsg.format(key=context_key(context) + '/list item ' + unicode(i)), - context_mark=value.mark, - problem=msg_func(item), - problem_mark=item.mark) - if fhadproblem: - hadproblem = True - if not proceed: - return proceed, hadproblem - i += 1 - return True, hadproblem - - def check_either(self, value, context_mark, data, context, echoerr, start, end): - havemarks(value) - new_echoerr = DelayedEchoErr(echoerr) - - hadproblem = False - for spec in self.specs[start:end]: - proceed, hadproblem = spec.match(value, value.mark, data, context, new_echoerr) - if not proceed: - break - if not hadproblem: - return True, False - - new_echoerr.echo_all() - - return False, hadproblem - - def check_tuple(self, value, context_mark, data, context, echoerr, start, end): - havemarks(value) - hadproblem = False - for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]): - proceed, ihadproblem = spec.match( - item, - value.mark, - data, - context + ((MarkedUnicode('tuple item ' + unicode(i), item.mark), item),), - echoerr - ) - if ihadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - return True, hadproblem - - def type(self, *args): - self.checks.append(('check_type', args)) - return self - - cmp_funcs = { - 'le': lambda x, y: x <= y, - 'lt': lambda x, y: x < y, - 'ge': lambda x, y: x >= y, - 'gt': lambda x, y: x > y, - 'eq': lambda x, y: x == y, - } - - cmp_msgs = { - 'le': 'lesser or equal to', - 'lt': 'lesser then', - 'ge': 'greater or equal to', - 'gt': 'greater then', - 'eq': 'equal to', - } - - def len(self, comparison, cint, msg_func=None): - cmp_func = self.cmp_funcs[comparison] - msg_func = ( - msg_func - or (lambda value: 'length of {0!r} is not {1} {2}'.format( - value, self.cmp_msgs[comparison], cint)) - ) - self.checks.append(( - 'check_func', - (lambda value, *args: (True, True, not cmp_func(len(value), cint))), - msg_func - )) - return self - - def cmp(self, comparison, cint, msg_func=None): - if type(cint) is str: - self.type(unicode) - elif type(cint) is float: - self.type(int, float) - else: - self.type(type(cint)) - cmp_func = self.cmp_funcs[comparison] - msg_func = msg_func or (lambda value: '{0} is not {1} {2}'.format(value, self.cmp_msgs[comparison], cint)) - self.checks.append(( - 'check_func', - (lambda value, *args: (True, True, not cmp_func(value.value, cint))), - msg_func - )) - return self - - def unsigned(self, msg_func=None): - self.type(int) - self.checks.append(( - 'check_func', - (lambda value, *args: (True, True, value < 0)), - (lambda value: '{0} must be greater then zero'.format(value)) - )) - return self - - def list(self, item_func, msg_func=None): - self.type(list) - if isinstance(item_func, Spec): - self.specs.append(item_func) - item_func = len(self.specs) - 1 - self.checks.append(('check_list', item_func, msg_func or (lambda item: 'failed check'))) - return self - - def tuple(self, *specs): - self.type(list) - - max_len = len(specs) - min_len = max_len - for spec in reversed(specs): - if spec.isoptional: - min_len -= 1 - else: - break - if max_len == min_len: - self.len('eq', len(specs)) - else: - self.len('ge', min_len) - self.len('le', max_len) - - start = len(self.specs) - for i, spec in zip(itertools.count(), specs): - self.specs.append(spec) - self.checks.append(('check_tuple', start, len(self.specs))) - return self - - def func(self, func, msg_func=None): - self.checks.append(('check_func', func, msg_func or (lambda value: 'failed check'))) - return self - - def re(self, regex, msg_func=None): - self.type(unicode) - compiled = re.compile(regex) - msg_func = msg_func or (lambda value: 'String "{0}" does not match "{1}"'.format(value, regex)) - self.checks.append(( - 'check_func', - (lambda value, *args: (True, True, not compiled.match(value.value))), - msg_func - )) - return self - - def ident(self, msg_func=None): - msg_func = ( - msg_func - or (lambda value: 'String "{0}" is not an alphanumeric/underscore colon-separated identifier'.format(value)) - ) - return self.re('^\w+(?::\w+)?$', msg_func) - - def oneof(self, collection, msg_func=None): - msg_func = msg_func or (lambda value: '"{0}" must be one of {1!r}'.format(value, list(collection))) - self.checks.append(( - 'check_func', - (lambda value, *args: (True, True, value not in collection)), - msg_func - )) - return self - - def error(self, msg): - self.checks.append(( - 'check_func', - (lambda *args: (True, True, True)), - (lambda value: msg.format(value)) - )) - return self - - def either(self, *specs): - start = len(self.specs) - self.specs.extend(specs) - self.checks.append(('check_either', start, len(self.specs))) - return self - - def optional(self): - self.isoptional = True - return self - - def match_checks(self, *args): - hadproblem = False - for check in self.checks: - proceed, chadproblem = getattr(self, check[0])(*(args + check[1:])) - if chadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - return True, hadproblem - - def match(self, value, context_mark=None, data=None, context=EMPTYTUPLE, echoerr=echoerr): - havemarks(value) - proceed, hadproblem = self.match_checks(value, context_mark, data, context, echoerr) - if proceed: - if self.keys or self.uspecs: - for key, vali in self.keys.items(): - valspec = self.specs[vali] - if key in value: - proceed, mhadproblem = valspec.match( - value[key], - value.mark, - data, - context + new_context_item(key, value), - echoerr - ) - if mhadproblem: - hadproblem = True - if not proceed: - return False, hadproblem - else: - if not valspec.isoptional: - hadproblem = True - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=None, - problem='required key is missing: {0}'.format(key), - problem_mark=value.mark) - for key in value.keys(): - havemarks(key) - if key not in self.keys: - for keyfunc, vali in self.uspecs: - valspec = self.specs[vali] - if isinstance(keyfunc, int): - spec = self.specs[keyfunc] - proceed, khadproblem = spec.match(key, context_mark, data, context, echoerr) - else: - proceed, khadproblem = keyfunc(key, data, context, echoerr) - if khadproblem: - hadproblem = True - if proceed: - proceed, vhadproblem = valspec.match( - value[key], - value.mark, - data, - context + new_context_item(key, value), - echoerr - ) - if vhadproblem: - hadproblem = True - break - else: - hadproblem = True - if self.ufailmsg: - echoerr(context=self.cmsg.format(key=context_key(context)), - context_mark=None, - problem=self.ufailmsg(key), - problem_mark=key.mark) - return True, hadproblem - - -class WithPath(object): - def __init__(self, import_paths): - self.import_paths = import_paths - - def __enter__(self): - self.oldpath = sys.path - sys.path = self.import_paths + sys.path - - def __exit__(self, *args): - sys.path = self.oldpath - - -def check_matcher_func(ext, match_name, data, context, echoerr): - havemarks(match_name) - import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])] - - match_module, separator, match_function = match_name.rpartition('.') - if not separator: - match_module = 'powerline.matchers.{0}'.format(ext) - match_function = match_name - with WithPath(import_paths): - try: - func = getattr(__import__(str(match_module), fromlist=[str(match_function)]), str(match_function)) - except ImportError: - echoerr(context='Error while loading matcher functions', - problem='failed to load module {0}'.format(match_module), - problem_mark=match_name.mark) - return True, False, True - except AttributeError: - echoerr(context='Error while loading matcher functions', - problem='failed to load matcher function {0}'.format(match_function), - problem_mark=match_name.mark) - return True, False, True - - if not callable(func): - echoerr(context='Error while loading matcher functions', - problem='loaded "function" {0} is not callable'.format(match_function), - problem_mark=match_name.mark) - return True, False, True - - if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'): - if func.func_code.co_argcount != 1: - echoerr( - context='Error while loading matcher functions', - problem=( - 'function {0} accepts {1} arguments instead of 1. ' - 'Are you sure it is the proper function?' - ).format(match_function, func.func_code.co_argcount), - problem_mark=match_name.mark - ) - - return True, False, False - - -def check_ext(ext, data, context, echoerr): - havemarks(ext) - hadsomedirs = False - hadproblem = False - if ext not in data['lists']['exts']: - hadproblem = True - echoerr(context='Error while loading {0} extension configuration'.format(ext), - context_mark=ext.mark, - problem='extension configuration does not exist') - else: - for typ in ('themes', 'colorschemes'): - if ext not in data['configs'][typ] and not data['configs']['top_' + typ]: - hadproblem = True - echoerr(context='Error while loading {0} extension configuration'.format(ext), - context_mark=ext.mark, - problem='{0} configuration does not exist'.format(typ)) - else: - hadsomedirs = True - return hadsomedirs, hadproblem - - -def check_config(d, theme, data, context, echoerr): - context_has_marks(context) - if len(context) == 4: - ext = context[-2][0] - else: - # local_themes - ext = context[-3][0] - if ext not in data['lists']['exts']: - echoerr(context='Error while loading {0} extension configuration'.format(ext), - context_mark=ext.mark, - problem='extension configuration does not exist') - return True, False, True - if ( - (ext not in data['configs'][d] or theme not in data['configs'][d][ext]) - and theme not in data['configs']['top_' + d] - ): - echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext), - problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme), - problem_mark=theme.mark) - return True, False, True - return True, False, False - - -def check_top_theme(theme, data, context, echoerr): - context_has_marks(context) - havemarks(theme) - if theme not in data['configs']['top_themes']: - echoerr(context='Error while checking extension configuration (key {key})'.format(key=context_key(context)), - context_mark=context[-2][0].mark, - problem='failed to find top theme {0}'.format(theme), - problem_mark=theme.mark) - return True, False, True - return True, False, False +def generate_json_config_loader(lhadproblem): + def load_json_config(config_file_path, load=load, open_file=open_file): + with open_file(config_file_path) as config_file_fp: + r, hadproblem = load(config_file_fp) + if hadproblem: + lhadproblem[0] = True + return r + return load_json_config function_name_re = '^(\w+\.)*[a-zA-Z_]\w*$' @@ -659,97 +135,6 @@ colors_spec = (Spec( ).context_message('Error while loading colors configuration')) -def check_color(color, data, context, echoerr): - havemarks(color) - if (color not in data['colors_config'].get('colors', {}) - and color not in data['colors_config'].get('gradients', {})): - echoerr( - context='Error while checking highlight group in colorscheme (key {key})'.format( - key=context_key(context)), - problem='found unexistent color or gradient {0}'.format(color), - problem_mark=color.mark - ) - return True, False, True - return True, False, False - - -def check_translated_group_name(group, data, context, echoerr): - return check_group(group, data, context, echoerr) - - -def check_group(group, data, context, echoerr): - havemarks(group) - if not isinstance(group, unicode): - return True, False, False - colorscheme = data['colorscheme'] - ext = data['ext'] - configs = [] - if ext: - if colorscheme == '__main__': - configs.append([config for config in data['ext_colorscheme_configs'][ext].items()]) - configs.append([config for config in data['top_colorscheme_configs'].items()]) - else: - try: - configs.append([data['ext_colorscheme_configs'][ext][colorscheme]]) - except KeyError: - pass - try: - configs.append([data['ext_colorscheme_configs'][ext]['__main__']]) - except KeyError: - pass - try: - configs.append([data['top_colorscheme_configs'][colorscheme]]) - except KeyError: - pass - else: - try: - configs.append([data['top_colorscheme_configs'][colorscheme]]) - except KeyError: - pass - new_echoerr = DelayedEchoErr(echoerr) - hadproblem = False - for config_lst in configs: - tofind = len(config_lst) - not_found = [] - for config in config_lst: - if isinstance(config, tuple): - new_colorscheme, config = config - new_data = data.copy() - new_data['colorscheme'] = new_colorscheme - else: - new_data = data - havemarks(config) - try: - group_data = config['groups'][group] - except KeyError: - not_found.append(config.mark.name) - else: - proceed, echo, chadproblem = check_group( - group_data, - new_data, - context, - echoerr, - ) - if chadproblem: - hadproblem = True - else: - tofind -= 1 - if not tofind: - return proceed, echo, hadproblem - if not proceed: - break - if not_found: - new_echoerr( - context='Error while checking group definition in colorscheme (key {key})'.format( - key=context_key(context)), - problem='name {0} is not present in {1} {2} colorschemes: {3}'.format( - group, tofind, ext, ', '.join(not_found)), - problem_mark=group.mark - ) - new_echoerr.echo_all() - return True, False, hadproblem - - color_spec = Spec().type(unicode).func(check_color).copy name_spec = Spec().type(unicode).len('gt', 0).optional().copy group_name_spec = Spec().ident().copy @@ -804,557 +189,6 @@ shell_colorscheme_spec = (Spec( ).context_message('Error while loading shell colorscheme')) -generic_keys = set(( - 'exclude_modes', 'include_modes', - 'exclude_function', 'include_function', - 'width', 'align', - 'name', - 'draw_soft_divider', 'draw_hard_divider', - 'priority', - 'after', 'before', - 'display' -)) -type_keys = { - 'function': set(('function', 'args', 'draw_inner_divider')), - 'string': set(('contents', 'type', 'highlight_group', 'divider_highlight_group')), - 'segment_list': set(('function', 'segments', 'args', 'type')), -} -required_keys = { - 'function': set(('function',)), - 'string': set(()), - 'segment_list': set(('function', 'segments',)), -} -highlight_keys = set(('highlight_group', 'name')) - - -def check_key_compatibility(segment, data, context, echoerr): - context_has_marks(context) - havemarks(segment) - segment_type = segment.get('type', MarkedUnicode('function', None)) - havemarks(segment_type) - - if segment_type not in type_keys: - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='found segment with unknown type {0}'.format(segment_type), - problem_mark=segment_type.mark) - return False, False, True - - hadproblem = False - - keys = set(segment) - if not ((keys - generic_keys) < type_keys[segment_type]): - unknown_keys = keys - generic_keys - type_keys[segment_type] - echoerr( - context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem='found keys not used with the current segment type: {0}'.format( - list_sep.join(unknown_keys)), - problem_mark=list(unknown_keys)[0].mark - ) - hadproblem = True - - if not (keys >= required_keys[segment_type]): - missing_keys = required_keys[segment_type] - keys - echoerr( - context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem='found missing required keys: {0}'.format( - list_sep.join(missing_keys)) - ) - hadproblem = True - - if not (segment_type == 'function' or (keys & highlight_keys)): - echoerr( - context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=context[-1][1].mark, - problem=( - 'found missing keys required to determine highlight group. ' - 'Either highlight_group or name key must be present' - ) - ) - hadproblem = True - - return True, False, hadproblem - - -def check_segment_module(module, data, context, echoerr): - havemarks(module) - with WithPath(data['import_paths']): - try: - __import__(str(module)) - except ImportError as e: - if echoerr.logger.level >= logging.DEBUG: - echoerr.logger.exception(e) - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='failed to import module {0}'.format(module), - problem_mark=module.mark) - return True, False, True - return True, False, False - - -def get_function_strings(function_name, context, ext): - if '.' in function_name: - module, function_name = function_name.rpartition('.')[::2] - else: - module = context[0][1].get( - 'default_module', MarkedUnicode('powerline.segments.' + ext, None)) - return module, function_name - - -def check_full_segment_data(segment, data, context, echoerr): - if 'name' not in segment and 'function' not in segment: - return True, False, False - - ext = data['ext'] - theme_segment_data = context[0][1].get('segment_data', {}) - main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - if not main_theme_name or data['theme'] == main_theme_name: - top_segment_data = {} - else: - top_segment_data = data['ext_theme_configs'].get(main_theme_name, {}).get('segment_data', {}) - - if segment.get('type', 'function') == 'function': - function_name = segment.get('function') - if function_name: - module, function_name = get_function_strings(function_name, context, ext) - names = [module + '.' + function_name, function_name] - else: - names = [] - elif segment.get('name'): - names = [segment['name']] - else: - return True, False, False - - segment_copy = segment.copy() - - for key in ('before', 'after', 'args', 'contents'): - if key not in segment_copy: - for segment_data in [theme_segment_data, top_segment_data]: - for name in names: - try: - val = segment_data[name][key] - k = segment_data[name].keydict[key] - segment_copy[k] = val - except KeyError: - pass - - return check_key_compatibility(segment_copy, data, context, echoerr) - - -def import_function(function_type, name, data, context, echoerr, module): - context_has_marks(context) - havemarks(name, module) - - with WithPath(data['import_paths']): - try: - func = getattr(__import__(str(module), fromlist=[str(name)]), str(name)) - except ImportError: - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=name.mark, - problem='failed to import module {0}'.format(module), - problem_mark=module.mark) - return None - except AttributeError: - echoerr(context='Error while loading {0} function (key {key})'.format(function_type, key=context_key(context)), - problem='failed to load function {0} from module {1}'.format(name, module), - problem_mark=name.mark) - return None - - if not callable(func): - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - context_mark=name.mark, - problem='imported "function" {0} from module {1} is not callable'.format(name, module), - problem_mark=module.mark) - return None - - return func - - -def import_segment(*args, **kwargs): - return import_function('segment', *args, **kwargs) - - -def check_segment_function(function_name, data, context, echoerr): - havemarks(function_name) - ext = data['ext'] - module, function_name = get_function_strings(function_name, context, ext) - if context[-2][1].get('type', 'function') == 'function': - func = import_segment(function_name, data, context, echoerr, module=module) - - if not func: - return True, False, True - - hl_groups = [] - divider_hl_group = None - - if func.__doc__: - H_G_USED_STR = 'Highlight groups used: ' - LHGUS = len(H_G_USED_STR) - D_H_G_USED_STR = 'Divider highlight group used: ' - LDHGUS = len(D_H_G_USED_STR) - pointer = 0 - mark_name = '<{0} docstring>'.format(function_name) - for i, line in enumerate(func.__doc__.split('\n')): - if H_G_USED_STR in line: - idx = line.index(H_G_USED_STR) + LHGUS - hl_groups.append(( - line[idx:], - (mark_name, i + 1, idx + 1, func.__doc__), - pointer + idx - )) - elif D_H_G_USED_STR in line: - idx = line.index(D_H_G_USED_STR) + LDHGUS + 2 - mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx) - divider_hl_group = MarkedUnicode(line[idx:-3], mark) - pointer += len(line) + len('\n') - - hadproblem = False - - if divider_hl_group: - r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True) - if r: - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem=( - 'found highlight group {0} not defined in the following colorschemes: {1}\n' - '(Group name was obtained from function documentation.)' - ).format(divider_hl_group, list_sep.join(r)), - problem_mark=function_name.mark - ) - hadproblem = True - - if hl_groups: - greg = re.compile(r'``([^`]+)``( \(gradient\))?') - parsed_hl_groups = [] - for line, mark_args, pointer in hl_groups: - for s in line.split(', '): - required_pack = [] - sub_pointer = pointer - for subs in s.split(' or '): - match = greg.match(subs) - try: - if not match: - continue - hl_group = MarkedUnicode( - match.group(1), - Mark(*mark_args, pointer=sub_pointer + match.start(1)) - ) - gradient = bool(match.group(2)) - required_pack.append((hl_group, gradient)) - finally: - sub_pointer += len(subs) + len(' or ') - parsed_hl_groups.append(required_pack) - pointer += len(s) + len(', ') - del hl_group, gradient - for required_pack in parsed_hl_groups: - rs = [ - hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False)) - for hl_group, gradient in required_pack - ] - if all(rs): - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem=( - 'found highlight groups list ({0}) with all groups not defined in some colorschemes\n' - '(Group names were taken from function documentation.)' - ).format(list_sep.join((h[0] for h in required_pack))), - problem_mark=function_name.mark - ) - for r, h in zip(rs, required_pack): - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - h[0], list_sep.join(r)) - ) - hadproblem = True - else: - r = hl_exists(function_name, data, context, echoerr, allow_gradients=True) - if r: - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem=( - 'found highlight group {0} not defined in the following colorschemes: {1}\n' - '(If not specified otherwise in documentation, ' - 'highlight group for function segments\n' - 'is the same as the function name.)' - ).format(function_name, list_sep.join(r)), - problem_mark=function_name.mark - ) - hadproblem = True - - return True, False, hadproblem - elif context[-2][1].get('type') != 'segment_list': - if function_name not in context[0][1].get('segment_data', {}): - main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - if data['theme'] == main_theme_name: - main_theme = {} - else: - main_theme = data['ext_theme_configs'].get(main_theme_name, {}) - if ( - function_name not in main_theme.get('segment_data', {}) - and function_name not in data['ext_theme_configs'].get('__main__', {}).get('segment_data', {}) - and not any(((function_name in theme.get('segment_data', {})) for theme in data['top_themes'].values())) - ): - echoerr(context='Error while checking segments (key {key})'.format(key=context_key(context)), - problem='found useless use of name key (such name is not present in theme/segment_data)', - problem_mark=function_name.mark) - - return True, False, False - - -def hl_exists(hl_group, data, context, echoerr, allow_gradients=False): - havemarks(hl_group) - ext = data['ext'] - if ext not in data['colorscheme_configs']: - # No colorschemes. Error was already reported, no need to report it - # twice - return [] - r = [] - for colorscheme, cconfig in data['colorscheme_configs'][ext].items(): - if hl_group not in cconfig.get('groups', {}): - r.append(colorscheme) - elif not allow_gradients or allow_gradients == 'force': - group_config = cconfig['groups'][hl_group] - havemarks(group_config) - hadgradient = False - for ckey in ('fg', 'bg'): - color = group_config.get(ckey) - if not color: - # No color. Error was already reported. - continue - havemarks(color) - # Gradients are only allowed for function segments. Note that - # whether *either* color or gradient exists should have been - # already checked - hascolor = color in data['colors_config'].get('colors', {}) - hasgradient = color in data['colors_config'].get('gradients', {}) - if hasgradient: - hadgradient = True - if allow_gradients is False and not hascolor and hasgradient: - echoerr( - context='Error while checking highlight group in theme (key {key})'.format( - key=context_key(context)), - context_mark=hl_group.mark, - problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color), - problem_mark=color.mark - ) - r.append(colorscheme) - continue - if allow_gradients == 'force' and not hadgradient: - echoerr( - context='Error while checking highlight group in theme (key {key})'.format( - key=context_key(context)), - context_mark=hl_group.mark, - problem='group {0} should have at least one gradient color, but it has no'.format(hl_group), - problem_mark=group_config.mark - ) - r.append(colorscheme) - return r - - -def check_highlight_group(hl_group, data, context, echoerr): - havemarks(hl_group) - r = hl_exists(hl_group, data, context, echoerr) - if r: - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - hl_group, list_sep.join(r)), - problem_mark=hl_group.mark - ) - return True, False, True - return True, False, False - - -def check_highlight_groups(hl_groups, data, context, echoerr): - havemarks(hl_groups) - rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups] - if all(rs): - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format( - list_sep.join((unicode(h) for h in hl_groups))), - problem_mark=hl_groups.mark - ) - for r, hl_group in zip(rs, hl_groups): - echoerr( - context='Error while checking theme (key {key})'.format(key=context_key(context)), - problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( - hl_group, list_sep.join(r)), - problem_mark=hl_group.mark - ) - return True, False, True - return True, False, False - - -def list_themes(data, context): - theme_type = data['theme_type'] - ext = data['ext'] - main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) - is_main_theme = (data['theme'] == main_theme_name) - if theme_type == 'top': - return list(itertools.chain(*[ - [(theme_ext, theme) for theme in theme_configs.values()] - for theme_ext, theme_configs in data['theme_configs'].items() - ])) - elif theme_type == 'main' or is_main_theme: - return [(ext, theme) for theme in data['ext_theme_configs'].values()] - else: - return [(ext, context[0][1])] - - -def check_segment_data_key(key, data, context, echoerr): - havemarks(key) - has_module_name = '.' in key - found = False - for ext, theme in list_themes(data, context): - for segments in theme.get('segments', {}).values(): - for segment in segments: - if 'name' in segment: - if key == segment['name']: - found = True - break - else: - function_name = segment.get('function') - if function_name: - module, function_name = get_function_strings(function_name, ((None, theme),), ext) - if has_module_name: - full_name = module + '.' + function_name - if key == full_name: - found = True - break - else: - if key == function_name: - found = True - break - if found: - break - if found: - break - else: - if data['theme_type'] != 'top': - echoerr(context='Error while checking segment data', - problem='found key {0} that cannot be associated with any segment'.format(key), - problem_mark=key.mark) - return True, False, True - - return True, False, False - - -threaded_args_specs = { - 'interval': Spec().cmp('gt', 0.0), - 'update_first': Spec().type(bool), - 'shutdown_event': Spec().error('Shutdown event must be set by powerline'), -} - - -def check_args_variant(func, args, data, context, echoerr): - havemarks(args) - argspec = getconfigargspec(func) - present_args = set(args) - all_args = set(argspec.args) - required_args = set(argspec.args[:-len(argspec.defaults)]) - - hadproblem = False - - if required_args - present_args: - echoerr( - context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=args.mark, - problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args)) - ) - hadproblem = True - - if not all_args >= present_args: - echoerr(context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=args.mark, - problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)), - problem_mark=next(iter(present_args - all_args)).mark) - hadproblem = True - - if isinstance(func, ThreadedSegment): - for key in set(threaded_args_specs) & present_args: - proceed, khadproblem = threaded_args_specs[key].match( - args[key], - args.mark, - data, - context + new_context_item(key, args), - echoerr - ) - if khadproblem: - hadproblem = True - if not proceed: - return hadproblem - - return hadproblem - - -def check_args(get_functions, args, data, context, echoerr): - context_has_marks(context) - new_echoerr = DelayedEchoErr(echoerr) - count = 0 - hadproblem = False - for func in get_functions(data, context, new_echoerr): - count += 1 - shadproblem = check_args_variant(func, args, data, context, echoerr) - if shadproblem: - hadproblem = True - - if not count: - hadproblem = True - if new_echoerr: - new_echoerr.echo_all() - else: - echoerr(context='Error while checking segment arguments (key {key})'.format(key=context_key(context)), - context_mark=context[-2][1].mark, - problem='no suitable segments found') - - return True, False, hadproblem - - -def get_one_segment_function(data, context, echoerr): - ext = data['ext'] - function_name = context[-2][1].get('function') - if function_name: - module, function_name = get_function_strings(function_name, context, ext) - func = import_segment(function_name, data, context, echoerr, module=module) - if func: - yield func - - -def get_all_possible_functions(data, context, echoerr): - name = context[-2][0] - module, name = name.rpartition('.')[::2] - if module: - func = import_segment(name, data, context, echoerr, module=module) - if func: - yield func - else: - for ext, theme_config in list_themes(data, context): - for segments in theme_config.get('segments', {}).values(): - for segment in segments: - if segment.get('type', 'function') == 'function': - function_name = segment.get('function') - current_name = segment.get('name') - if function_name: - module, function_name = get_function_strings(function_name, ((None, theme_config),), ext) - if current_name == name or function_name == name: - func = import_segment(function_name, data, context, echoerr, module=module) - if func: - yield func - - -def check_exinclude_function(name, data, context, echoerr): - ext = data['ext'] - module, name = name.rpartition('.')[::2] - if not module: - module = MarkedUnicode('powerline.selectors.' + ext, None) - func = import_function('selector', name, data, context, echoerr, module=module) - if not func: - return True, False, True - return True, False, False - - args_spec = Spec( pl=Spec().error('pl object must be set by powerline').optional(), segment_info=Spec().error('Segment info dictionary must be set by powerline').optional(), @@ -1455,17 +289,25 @@ theme_spec = common_theme_spec().update( ) -def generate_json_config_loader(lhadproblem): - def load_json_config(config_file_path, load=load, open_file=open_file): - with open_file(config_file_path) as config_file_fp: - r, hadproblem = load(config_file_fp) - if hadproblem: - lhadproblem[0] = True - return r - return load_json_config +def check(paths=None, debug=False, echoerr=echoerr, require_ext=None): + '''Check configuration sanity + :param list paths: + Paths from which configuration should be loaded. + :param bool debug: + Determines whether some information useful for debugging linter should + be output. + :param function echoerr: + Function that will be used to echo the error(s). Should accept four + optional keyword parameters: ``problem`` and ``problem_mark``, and + ``context`` and ``context_mark``. + :param str require_ext: + Require configuration for some extension to be present. -def check(paths=None, debug=False): + :return: + ``False`` if user configuration seems to be completely sane and ``True`` + if some problems were found. + ''' search_paths = paths or get_config_paths() find_config_files = generate_config_finder(lambda: search_paths) @@ -1475,6 +317,15 @@ def check(paths=None, debug=False): ee = EchoErr(echoerr, logger) + if require_ext: + used_main_spec = main_spec.copy() + try: + used_main_spec['ext'][require_ext].required() + except KeyError: + used_main_spec['ext'][require_ext] = ext_spec() + else: + used_main_spec = main_spec + lhadproblem = [False] load_json_config = generate_json_config_loader(lhadproblem) @@ -1507,7 +358,7 @@ def check(paths=None, debug=False): paths['top_' + typ].append(extpath) else: hadproblem = True - sys.stderr.write('Path {0} is supposed to be a directory, but it is not\n'.format(d)) + ee(problem='Path {0} is supposed to be a directory, but it is not'.format(d)) hadproblem = False @@ -1522,7 +373,7 @@ def check(paths=None, debug=False): lists[typ].add(name) if name.startswith('__') or name.endswith('__'): hadproblem = True - sys.stderr.write('File name is not supposed to start or end with “__”: {0}'.format( + ee(problem='File name is not supposed to start or end with “__”: {0}'.format( os.path.join(d, subp) )) configs[typ][ext][name] = os.path.join(d, subp) @@ -1536,7 +387,7 @@ def check(paths=None, debug=False): for ext in diff: typ = 'colorschemes' if ext in configs['themes'] else 'themes' if not configs['top_' + typ] or typ == 'themes': - sys.stderr.write('{0} extension {1} not present in {2}\n'.format( + ee(problem='{0} extension {1} not present in {2}'.format( ext, 'configuration' if (ext in paths['themes'] and ext in paths['colorschemes']) else 'directory', typ, @@ -1546,17 +397,17 @@ def check(paths=None, debug=False): main_config = load_config('config', find_config_files, config_loader) except IOError: main_config = {} - sys.stderr.write('\nConfiguration file not found: config.json\n') + ee(problem='Configuration file not found: config.json') hadproblem = True except MarkedError as e: main_config = {} - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True else: - if main_spec.match( + if used_main_spec.match( main_config, data={'configs': configs, 'lists': lists}, - context=init_context(main_config), + context=Context(main_config), echoerr=ee )[1]: hadproblem = True @@ -1567,14 +418,14 @@ def check(paths=None, debug=False): colors_config = load_config('colors', find_config_files, config_loader) except IOError: colors_config = {} - sys.stderr.write('\nConfiguration file not found: colors.json\n') + ee(problem='Configuration file not found: colors.json') hadproblem = True except MarkedError as e: colors_config = {} - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True else: - if colors_spec.match(colors_config, context=init_context(colors_config), echoerr=ee)[1]: + if colors_spec.match(colors_config, context=Context(colors_config), echoerr=ee)[1]: hadproblem = True if lhadproblem[0]: @@ -1592,14 +443,14 @@ def check(paths=None, debug=False): try: config, lhadproblem = load(config_file_fp) except MarkedError as e: - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True continue if lhadproblem: hadproblem = True top_colorscheme_configs[colorscheme] = config data['colorscheme'] = colorscheme - if top_colorscheme_spec.match(config, context=init_context(config), data=data, echoerr=ee)[1]: + if top_colorscheme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]: hadproblem = True ext_colorscheme_configs = defaultdict(lambda: {}) @@ -1609,7 +460,7 @@ def check(paths=None, debug=False): try: config, lhadproblem = load(config_file_fp) except MarkedError as e: - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True continue if lhadproblem: @@ -1631,7 +482,7 @@ def check(paths=None, debug=False): spec = shell_colorscheme_spec else: spec = colorscheme_spec - if spec.match(config, context=init_context(config), data=data, echoerr=ee)[1]: + if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]: hadproblem = True colorscheme_configs = {} @@ -1662,7 +513,7 @@ def check(paths=None, debug=False): try: config, lhadproblem = load(config_file_fp) except MarkedError as e: - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True continue if lhadproblem: @@ -1675,7 +526,7 @@ def check(paths=None, debug=False): try: config, lhadproblem = load(config_file_fp) except MarkedError as e: - sys.stderr.write(str(e) + '\n') + ee(problem=str(e)) hadproblem = True continue if lhadproblem: @@ -1700,7 +551,7 @@ def check(paths=None, debug=False): else: data['theme_type'] = 'regular' spec = theme_spec - if spec.match(config, context=init_context(config), data=data, echoerr=ee)[1]: + if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]: hadproblem = True for top_theme, config in top_theme_configs.items(): @@ -1715,7 +566,7 @@ def check(paths=None, debug=False): } data['theme_type'] = 'top' data['theme'] = top_theme - if top_theme_spec.match(config, context=init_context(config), data=data, echoerr=ee)[1]: + if top_theme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]: hadproblem = True return hadproblem diff --git a/powerline/lint/checks.py b/powerline/lint/checks.py new file mode 100644 index 00000000..a59d4440 --- /dev/null +++ b/powerline/lint/checks.py @@ -0,0 +1,706 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import os +import re +import logging + +from powerline.lib.threaded import ThreadedSegment +from powerline.lib.unicode import unicode +from powerline.lint.markedjson.markedvalue import MarkedUnicode +from powerline.lint.markedjson.error import DelayedEchoErr, Mark +from powerline.lint.selfcheck import havemarks +from powerline.lint.context import JStr, list_themes +from powerline.lint.imp import WithPath, import_function, import_segment +from powerline.lint.spec import Spec +from powerline.lint.inspect import getconfigargspec + + +list_sep = JStr(', ') + + +generic_keys = set(( + 'exclude_modes', 'include_modes', + 'exclude_function', 'include_function', + 'width', 'align', + 'name', + 'draw_soft_divider', 'draw_hard_divider', + 'priority', + 'after', 'before', + 'display' +)) +type_keys = { + 'function': set(('function', 'args', 'draw_inner_divider')), + 'string': set(('contents', 'type', 'highlight_group', 'divider_highlight_group')), + 'segment_list': set(('function', 'segments', 'args', 'type')), +} +required_keys = { + 'function': set(('function',)), + 'string': set(()), + 'segment_list': set(('function', 'segments',)), +} +highlight_keys = set(('highlight_group', 'name')) + + +def get_function_strings(function_name, context, ext): + if '.' in function_name: + module, function_name = function_name.rpartition('.')[::2] + else: + module = context[0][1].get( + 'default_module', MarkedUnicode('powerline.segments.' + ext, None)) + return module, function_name + + +def check_matcher_func(ext, match_name, data, context, echoerr): + havemarks(match_name) + import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])] + + match_module, separator, match_function = match_name.rpartition('.') + if not separator: + match_module = 'powerline.matchers.{0}'.format(ext) + match_function = match_name + with WithPath(import_paths): + try: + func = getattr(__import__(str(match_module), fromlist=[str(match_function)]), str(match_function)) + except ImportError: + echoerr(context='Error while loading matcher functions', + problem='failed to load module {0}'.format(match_module), + problem_mark=match_name.mark) + return True, False, True + except AttributeError: + echoerr(context='Error while loading matcher functions', + problem='failed to load matcher function {0}'.format(match_function), + problem_mark=match_name.mark) + return True, False, True + + if not callable(func): + echoerr(context='Error while loading matcher functions', + problem='loaded “function” {0} is not callable'.format(match_function), + problem_mark=match_name.mark) + return True, False, True + + if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'): + if func.func_code.co_argcount != 1: + echoerr( + context='Error while loading matcher functions', + problem=( + 'function {0} accepts {1} arguments instead of 1. ' + 'Are you sure it is the proper function?' + ).format(match_function, func.func_code.co_argcount), + problem_mark=match_name.mark + ) + + return True, False, False + + +def check_ext(ext, data, context, echoerr): + havemarks(ext) + hadsomedirs = False + hadproblem = False + if ext not in data['lists']['exts']: + hadproblem = True + echoerr(context='Error while loading {0} extension configuration'.format(ext), + context_mark=ext.mark, + problem='extension configuration does not exist') + else: + for typ in ('themes', 'colorschemes'): + if ext not in data['configs'][typ] and not data['configs']['top_' + typ]: + hadproblem = True + echoerr(context='Error while loading {0} extension configuration'.format(ext), + context_mark=ext.mark, + problem='{0} configuration does not exist'.format(typ)) + else: + hadsomedirs = True + return hadsomedirs, hadproblem + + +def check_config(d, theme, data, context, echoerr): + if len(context) == 4: + ext = context[-2][0] + else: + # local_themes + ext = context[-3][0] + if ext not in data['lists']['exts']: + echoerr(context='Error while loading {0} extension configuration'.format(ext), + context_mark=ext.mark, + problem='extension configuration does not exist') + return True, False, True + if ( + (ext not in data['configs'][d] or theme not in data['configs'][d][ext]) + and theme not in data['configs']['top_' + d] + ): + echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext), + problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme), + problem_mark=theme.mark) + return True, False, True + return True, False, False + + +def check_top_theme(theme, data, context, echoerr): + havemarks(theme) + if theme not in data['configs']['top_themes']: + echoerr(context='Error while checking extension configuration (key {key})'.format(key=context.key), + context_mark=context[-2][0].mark, + problem='failed to find top theme {0}'.format(theme), + problem_mark=theme.mark) + return True, False, True + return True, False, False + + +def check_color(color, data, context, echoerr): + havemarks(color) + if (color not in data['colors_config'].get('colors', {}) + and color not in data['colors_config'].get('gradients', {})): + echoerr( + context='Error while checking highlight group in colorscheme (key {key})'.format( + key=context.key), + problem='found unexistent color or gradient {0}'.format(color), + problem_mark=color.mark + ) + return True, False, True + return True, False, False + + +def check_translated_group_name(group, data, context, echoerr): + return check_group(group, data, context, echoerr) + + +def check_group(group, data, context, echoerr): + havemarks(group) + if not isinstance(group, unicode): + return True, False, False + colorscheme = data['colorscheme'] + ext = data['ext'] + configs = [] + if ext: + if colorscheme == '__main__': + configs.append([config for config in data['ext_colorscheme_configs'][ext].items()]) + configs.append([config for config in data['top_colorscheme_configs'].items()]) + else: + try: + configs.append([data['ext_colorscheme_configs'][ext][colorscheme]]) + except KeyError: + pass + try: + configs.append([data['ext_colorscheme_configs'][ext]['__main__']]) + except KeyError: + pass + try: + configs.append([data['top_colorscheme_configs'][colorscheme]]) + except KeyError: + pass + else: + try: + configs.append([data['top_colorscheme_configs'][colorscheme]]) + except KeyError: + pass + new_echoerr = DelayedEchoErr(echoerr) + hadproblem = False + for config_lst in configs: + tofind = len(config_lst) + not_found = [] + for config in config_lst: + if isinstance(config, tuple): + new_colorscheme, config = config + new_data = data.copy() + new_data['colorscheme'] = new_colorscheme + else: + new_data = data + havemarks(config) + try: + group_data = config['groups'][group] + except KeyError: + not_found.append(config.mark.name) + else: + proceed, echo, chadproblem = check_group( + group_data, + new_data, + context, + echoerr, + ) + if chadproblem: + hadproblem = True + else: + tofind -= 1 + if not tofind: + return proceed, echo, hadproblem + if not proceed: + break + if not_found: + new_echoerr( + context='Error while checking group definition in colorscheme (key {key})'.format( + key=context.key), + problem='name {0} is not present in {1} {2} colorschemes: {3}'.format( + group, tofind, ext, ', '.join(not_found)), + problem_mark=group.mark + ) + new_echoerr.echo_all() + return True, False, hadproblem + + +def check_key_compatibility(segment, data, context, echoerr): + havemarks(segment) + segment_type = segment.get('type', MarkedUnicode('function', None)) + havemarks(segment_type) + + if segment_type not in type_keys: + echoerr(context='Error while checking segments (key {key})'.format(key=context.key), + problem='found segment with unknown type {0}'.format(segment_type), + problem_mark=segment_type.mark) + return False, False, True + + hadproblem = False + + keys = set(segment) + if not ((keys - generic_keys) < type_keys[segment_type]): + unknown_keys = keys - generic_keys - type_keys[segment_type] + echoerr( + context='Error while checking segments (key {key})'.format(key=context.key), + context_mark=context[-1][1].mark, + problem='found keys not used with the current segment type: {0}'.format( + list_sep.join(unknown_keys)), + problem_mark=list(unknown_keys)[0].mark + ) + hadproblem = True + + if not (keys >= required_keys[segment_type]): + missing_keys = required_keys[segment_type] - keys + echoerr( + context='Error while checking segments (key {key})'.format(key=context.key), + context_mark=context[-1][1].mark, + problem='found missing required keys: {0}'.format( + list_sep.join(missing_keys)) + ) + hadproblem = True + + if not (segment_type == 'function' or (keys & highlight_keys)): + echoerr( + context='Error while checking segments (key {key})'.format(key=context.key), + context_mark=context[-1][1].mark, + problem=( + 'found missing keys required to determine highlight group. ' + 'Either highlight_group or name key must be present' + ) + ) + hadproblem = True + + return True, False, hadproblem + + +def check_segment_module(module, data, context, echoerr): + havemarks(module) + with WithPath(data['import_paths']): + try: + __import__(str(module)) + except ImportError as e: + if echoerr.logger.level >= logging.DEBUG: + echoerr.logger.exception(e) + echoerr(context='Error while checking segments (key {key})'.format(key=context.key), + problem='failed to import module {0}'.format(module), + problem_mark=module.mark) + return True, False, True + return True, False, False + + +def check_full_segment_data(segment, data, context, echoerr): + if 'name' not in segment and 'function' not in segment: + return True, False, False + + ext = data['ext'] + theme_segment_data = context[0][1].get('segment_data', {}) + main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) + if not main_theme_name or data['theme'] == main_theme_name: + top_segment_data = {} + else: + top_segment_data = data['ext_theme_configs'].get(main_theme_name, {}).get('segment_data', {}) + + if segment.get('type', 'function') == 'function': + function_name = segment.get('function') + if function_name: + module, function_name = get_function_strings(function_name, context, ext) + names = [module + '.' + function_name, function_name] + else: + names = [] + elif segment.get('name'): + names = [segment['name']] + else: + return True, False, False + + segment_copy = segment.copy() + + for key in ('before', 'after', 'args', 'contents'): + if key not in segment_copy: + for segment_data in [theme_segment_data, top_segment_data]: + for name in names: + try: + val = segment_data[name][key] + k = segment_data[name].keydict[key] + segment_copy[k] = val + except KeyError: + pass + + return check_key_compatibility(segment_copy, data, context, echoerr) + + +def check_segment_function(function_name, data, context, echoerr): + havemarks(function_name) + ext = data['ext'] + module, function_name = get_function_strings(function_name, context, ext) + if context[-2][1].get('type', 'function') == 'function': + func = import_segment(function_name, data, context, echoerr, module=module) + + if not func: + return True, False, True + + hl_groups = [] + divider_hl_group = None + + if func.__doc__: + H_G_USED_STR = 'Highlight groups used: ' + LHGUS = len(H_G_USED_STR) + D_H_G_USED_STR = 'Divider highlight group used: ' + LDHGUS = len(D_H_G_USED_STR) + pointer = 0 + mark_name = '<{0} docstring>'.format(function_name) + for i, line in enumerate(func.__doc__.split('\n')): + if H_G_USED_STR in line: + idx = line.index(H_G_USED_STR) + LHGUS + hl_groups.append(( + line[idx:], + (mark_name, i + 1, idx + 1, func.__doc__), + pointer + idx + )) + elif D_H_G_USED_STR in line: + idx = line.index(D_H_G_USED_STR) + LDHGUS + 2 + mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx) + divider_hl_group = MarkedUnicode(line[idx:-3], mark) + pointer += len(line) + len('\n') + + hadproblem = False + + if divider_hl_group: + r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True) + if r: + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem=( + 'found highlight group {0} not defined in the following colorschemes: {1}\n' + '(Group name was obtained from function documentation.)' + ).format(divider_hl_group, list_sep.join(r)), + problem_mark=function_name.mark + ) + hadproblem = True + + if hl_groups: + greg = re.compile(r'``([^`]+)``( \(gradient\))?') + parsed_hl_groups = [] + for line, mark_args, pointer in hl_groups: + for s in line.split(', '): + required_pack = [] + sub_pointer = pointer + for subs in s.split(' or '): + match = greg.match(subs) + try: + if not match: + continue + hl_group = MarkedUnicode( + match.group(1), + Mark(*mark_args, pointer=sub_pointer + match.start(1)) + ) + gradient = bool(match.group(2)) + required_pack.append((hl_group, gradient)) + finally: + sub_pointer += len(subs) + len(' or ') + parsed_hl_groups.append(required_pack) + pointer += len(s) + len(', ') + del hl_group, gradient + for required_pack in parsed_hl_groups: + rs = [ + hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False)) + for hl_group, gradient in required_pack + ] + if all(rs): + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem=( + 'found highlight groups list ({0}) with all groups not defined in some colorschemes\n' + '(Group names were taken from function documentation.)' + ).format(list_sep.join((h[0] for h in required_pack))), + problem_mark=function_name.mark + ) + for r, h in zip(rs, required_pack): + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( + h[0], list_sep.join(r)) + ) + hadproblem = True + else: + r = hl_exists(function_name, data, context, echoerr, allow_gradients=True) + if r: + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem=( + 'found highlight group {0} not defined in the following colorschemes: {1}\n' + '(If not specified otherwise in documentation, ' + 'highlight group for function segments\n' + 'is the same as the function name.)' + ).format(function_name, list_sep.join(r)), + problem_mark=function_name.mark + ) + hadproblem = True + + return True, False, hadproblem + elif context[-2][1].get('type') != 'segment_list': + if function_name not in context[0][1].get('segment_data', {}): + main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) + if data['theme'] == main_theme_name: + main_theme = {} + else: + main_theme = data['ext_theme_configs'].get(main_theme_name, {}) + if ( + function_name not in main_theme.get('segment_data', {}) + and function_name not in data['ext_theme_configs'].get('__main__', {}).get('segment_data', {}) + and not any(((function_name in theme.get('segment_data', {})) for theme in data['top_themes'].values())) + ): + echoerr(context='Error while checking segments (key {key})'.format(key=context.key), + problem='found useless use of name key (such name is not present in theme/segment_data)', + problem_mark=function_name.mark) + + return True, False, False + + +def hl_exists(hl_group, data, context, echoerr, allow_gradients=False): + havemarks(hl_group) + ext = data['ext'] + if ext not in data['colorscheme_configs']: + # No colorschemes. Error was already reported, no need to report it + # twice + return [] + r = [] + for colorscheme, cconfig in data['colorscheme_configs'][ext].items(): + if hl_group not in cconfig.get('groups', {}): + r.append(colorscheme) + elif not allow_gradients or allow_gradients == 'force': + group_config = cconfig['groups'][hl_group] + havemarks(group_config) + hadgradient = False + for ckey in ('fg', 'bg'): + color = group_config.get(ckey) + if not color: + # No color. Error was already reported. + continue + havemarks(color) + # Gradients are only allowed for function segments. Note that + # whether *either* color or gradient exists should have been + # already checked + hascolor = color in data['colors_config'].get('colors', {}) + hasgradient = color in data['colors_config'].get('gradients', {}) + if hasgradient: + hadgradient = True + if allow_gradients is False and not hascolor and hasgradient: + echoerr( + context='Error while checking highlight group in theme (key {key})'.format( + key=context.key), + context_mark=hl_group.mark, + problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color), + problem_mark=color.mark + ) + r.append(colorscheme) + continue + if allow_gradients == 'force' and not hadgradient: + echoerr( + context='Error while checking highlight group in theme (key {key})'.format( + key=context.key), + context_mark=hl_group.mark, + problem='group {0} should have at least one gradient color, but it has no'.format(hl_group), + problem_mark=group_config.mark + ) + r.append(colorscheme) + return r + + +def check_highlight_group(hl_group, data, context, echoerr): + havemarks(hl_group) + r = hl_exists(hl_group, data, context, echoerr) + if r: + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( + hl_group, list_sep.join(r)), + problem_mark=hl_group.mark + ) + return True, False, True + return True, False, False + + +def check_highlight_groups(hl_groups, data, context, echoerr): + havemarks(hl_groups) + rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups] + if all(rs): + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format( + list_sep.join((unicode(h) for h in hl_groups))), + problem_mark=hl_groups.mark + ) + for r, hl_group in zip(rs, hl_groups): + echoerr( + context='Error while checking theme (key {key})'.format(key=context.key), + problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( + hl_group, list_sep.join(r)), + problem_mark=hl_group.mark + ) + return True, False, True + return True, False, False + + +def check_segment_data_key(key, data, context, echoerr): + havemarks(key) + has_module_name = '.' in key + found = False + for ext, theme in list_themes(data, context): + for segments in theme.get('segments', {}).values(): + for segment in segments: + if 'name' in segment: + if key == segment['name']: + found = True + break + else: + function_name = segment.get('function') + if function_name: + module, function_name = get_function_strings(function_name, ((None, theme),), ext) + if has_module_name: + full_name = module + '.' + function_name + if key == full_name: + found = True + break + else: + if key == function_name: + found = True + break + if found: + break + if found: + break + else: + if data['theme_type'] != 'top': + echoerr(context='Error while checking segment data', + problem='found key {0} that cannot be associated with any segment'.format(key), + problem_mark=key.mark) + return True, False, True + + return True, False, False + + +threaded_args_specs = { + 'interval': Spec().cmp('gt', 0.0), + 'update_first': Spec().type(bool), + 'shutdown_event': Spec().error('Shutdown event must be set by powerline'), +} + + +def check_args_variant(func, args, data, context, echoerr): + havemarks(args) + argspec = getconfigargspec(func) + present_args = set(args) + all_args = set(argspec.args) + required_args = set(argspec.args[:-len(argspec.defaults)]) + + hadproblem = False + + if required_args - present_args: + echoerr( + context='Error while checking segment arguments (key {key})'.format(key=context.key), + context_mark=args.mark, + problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args)) + ) + hadproblem = True + + if not all_args >= present_args: + echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key), + context_mark=args.mark, + problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)), + problem_mark=next(iter(present_args - all_args)).mark) + hadproblem = True + + if isinstance(func, ThreadedSegment): + for key in set(threaded_args_specs) & present_args: + proceed, khadproblem = threaded_args_specs[key].match( + args[key], + args.mark, + data, + context.enter_key(args, key), + echoerr + ) + if khadproblem: + hadproblem = True + if not proceed: + return hadproblem + + return hadproblem + + +def check_args(get_functions, args, data, context, echoerr): + new_echoerr = DelayedEchoErr(echoerr) + count = 0 + hadproblem = False + for func in get_functions(data, context, new_echoerr): + count += 1 + shadproblem = check_args_variant(func, args, data, context, echoerr) + if shadproblem: + hadproblem = True + + if not count: + hadproblem = True + if new_echoerr: + new_echoerr.echo_all() + else: + echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key), + context_mark=context[-2][1].mark, + problem='no suitable segments found') + + return True, False, hadproblem + + +def get_one_segment_function(data, context, echoerr): + ext = data['ext'] + function_name = context[-2][1].get('function') + if function_name: + module, function_name = get_function_strings(function_name, context, ext) + func = import_segment(function_name, data, context, echoerr, module=module) + if func: + yield func + + +def get_all_possible_functions(data, context, echoerr): + name = context[-2][0] + module, name = name.rpartition('.')[::2] + if module: + func = import_segment(name, data, context, echoerr, module=module) + if func: + yield func + else: + for ext, theme_config in list_themes(data, context): + for segments in theme_config.get('segments', {}).values(): + for segment in segments: + if segment.get('type', 'function') == 'function': + function_name = segment.get('function') + current_name = segment.get('name') + if function_name: + module, function_name = get_function_strings(function_name, ((None, theme_config),), ext) + if current_name == name or function_name == name: + func = import_segment(function_name, data, context, echoerr, module=module) + if func: + yield func + + +def check_exinclude_function(name, data, context, echoerr): + ext = data['ext'] + module, name = name.rpartition('.')[::2] + if not module: + module = MarkedUnicode('powerline.selectors.' + ext, None) + func = import_function('selector', name, data, context, echoerr, module=module) + if not func: + return True, False, True + return True, False, False diff --git a/powerline/lint/context.py b/powerline/lint/context.py new file mode 100644 index 00000000..a48a2832 --- /dev/null +++ b/powerline/lint/context.py @@ -0,0 +1,68 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import itertools + +from powerline.lib.unicode import unicode +from powerline.lint.markedjson.markedvalue import MarkedUnicode +from powerline.lint.selfcheck import havemarks + + +class JStr(unicode): + def join(self, iterable): + return super(JStr, self).join((unicode(item) for item in iterable)) + + +key_sep = JStr('/') + + +def list_themes(data, context): + theme_type = data['theme_type'] + ext = data['ext'] + main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None) + is_main_theme = (data['theme'] == main_theme_name) + if theme_type == 'top': + return list(itertools.chain(*[ + [(theme_ext, theme) for theme in theme_configs.values()] + for theme_ext, theme_configs in data['theme_configs'].items() + ])) + elif theme_type == 'main' or is_main_theme: + return [(ext, theme) for theme in data['ext_theme_configs'].values()] + else: + return [(ext, context[0][1])] + + +class Context(tuple): + for func in dir(tuple): + if func in ('__getitem__', '__init__', '__getattribute__', '__len__', '__iter__'): + continue + exec(( + 'def {0}(self, *args, **kwargs):\n' + ' raise TypeError("{0} is not allowed for Context")' + ).format(func)) + del func + + __slots__ = () + + def __new__(cls, base, context_key=None, context_value=None): + if context_key is not None: + assert(context_value is not None) + assert(type(base) is Context) + havemarks(context_key, context_value) + return tuple.__new__(cls, tuple.__add__(base, ((context_key, context_value),))) + else: + havemarks(base) + return tuple.__new__(cls, ((MarkedUnicode('', base.mark), base),)) + + @property + def key(self): + return key_sep.join((c[0] for c in self)) + + def enter_key(self, value, key): + return self.enter(value.keydict[key], value[key]) + + def enter_item(self, name, item): + return self.enter(MarkedUnicode(name, item.mark), item) + + def enter(self, context_key, context_value): + return Context.__new__(Context, self, context_key, context_value) diff --git a/powerline/lint/imp.py b/powerline/lint/imp.py new file mode 100644 index 00000000..98b7591b --- /dev/null +++ b/powerline/lint/imp.py @@ -0,0 +1,56 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import sys + +from powerline.lint.selfcheck import havemarks + + +class WithPath(object): + def __init__(self, import_paths): + self.import_paths = import_paths + + def __enter__(self): + self.oldpath = sys.path + sys.path = self.import_paths + sys.path + + def __exit__(self, *args): + sys.path = self.oldpath + + +def import_function(function_type, name, data, context, echoerr, module): + havemarks(name, module) + + if module == 'powerline.segments.common': + echoerr(context='Warning while checking segments (key {key})'.format(key=context.key), + context_mark=name.mark, + problem='module {0} is deprecated'.format(module), + problem_mark=module.mark) + + with WithPath(data['import_paths']): + try: + func = getattr(__import__(str(module), fromlist=[str(name)]), str(name)) + except ImportError: + echoerr(context='Error while checking segments (key {key})'.format(key=context.key), + context_mark=name.mark, + problem='failed to import module {0}'.format(module), + problem_mark=module.mark) + return None + except AttributeError: + echoerr(context='Error while loading {0} function (key {key})'.format(function_type, key=context.key), + problem='failed to load function {0} from module {1}'.format(name, module), + problem_mark=name.mark) + return None + + if not callable(func): + echoerr(context='Error while checking segments (key {key})'.format(key=context.key), + context_mark=name.mark, + problem='imported “function” {0} from module {1} is not callable'.format(name, module), + problem_mark=module.mark) + return None + + return func + + +def import_segment(*args, **kwargs): + return import_function('segment', *args, **kwargs) diff --git a/powerline/lint/markedjson/__init__.py b/powerline/lint/markedjson/__init__.py index aa084eaf..dea5faf7 100644 --- a/powerline/lint/markedjson/__init__.py +++ b/powerline/lint/markedjson/__init__.py @@ -5,10 +5,12 @@ from powerline.lint.markedjson.loader import Loader def load(stream, Loader=Loader): - """ - Parse the first YAML document in a stream - and produce the corresponding Python object. - """ + '''Parse JSON value and produce the corresponding Python object + + :return: + (hadproblem, object) where first argument is true if there were errors + during loading JSON stream and second is the corresponding JSON object. + ''' loader = Loader(stream) try: r = loader.get_single_data() diff --git a/powerline/lint/markedjson/composer.py b/powerline/lint/markedjson/composer.py index 3067af04..bd5620d3 100644 --- a/powerline/lint/markedjson/composer.py +++ b/powerline/lint/markedjson/composer.py @@ -43,9 +43,9 @@ class Composer: if not self.check_event(events.StreamEndEvent): event = self.get_event() raise ComposerError( - "expected a single document in the stream", + 'expected a single document in the stream', document.start_mark, - "but found another document", + 'but found another document', event.start_mark ) @@ -109,8 +109,8 @@ class Composer: # key_event = self.peek_event() item_key = self.compose_node(node, None) # if item_key in node.value: - # raise ComposerError("while composing a mapping", start_event.start_mark, - # "found duplicate key", key_event.start_mark) + # raise ComposerError('while composing a mapping', start_event.start_mark, + # 'found duplicate key', key_event.start_mark) item_value = self.compose_node(node, item_key) # node.value[item_key] = item_value node.value.append((item_key, item_value)) diff --git a/powerline/lint/markedjson/constructor.py b/powerline/lint/markedjson/constructor.py index f51f25ac..2a95d840 100644 --- a/powerline/lint/markedjson/constructor.py +++ b/powerline/lint/markedjson/constructor.py @@ -94,7 +94,7 @@ class BaseConstructor: if not isinstance(node, nodes.ScalarNode): raise ConstructorError( None, None, - "expected a scalar node, but found %s" % node.id, + 'expected a scalar node, but found %s' % node.id, node.start_mark ) return node.value @@ -103,7 +103,7 @@ class BaseConstructor: if not isinstance(node, nodes.SequenceNode): raise ConstructorError( None, None, - "expected a sequence node, but found %s" % node.id, + 'expected a sequence node, but found %s' % node.id, node.start_mark ) return [ @@ -116,7 +116,7 @@ class BaseConstructor: if not isinstance(node, nodes.MappingNode): raise ConstructorError( None, None, - "expected a mapping node, but found %s" % node.id, + 'expected a mapping node, but found %s' % node.id, node.start_mark ) mapping = {} @@ -174,9 +174,9 @@ class Constructor(BaseConstructor): for subnode in value_node.value: if not isinstance(subnode, nodes.MappingNode): raise ConstructorError( - "while constructing a mapping", + 'while constructing a mapping', node.start_mark, - "expected a mapping for merging, but found %s" % subnode.id, + 'expected a mapping for merging, but found %s' % subnode.id, subnode.start_mark ) self.flatten_mapping(subnode) @@ -186,9 +186,9 @@ class Constructor(BaseConstructor): merge.extend(value) else: raise ConstructorError( - "while constructing a mapping", + 'while constructing a mapping', node.start_mark, - ("expected a mapping or list of mappings for merging, but found %s" % value_node.id), + ('expected a mapping or list of mappings for merging, but found %s' % value_node.id), value_node.start_mark ) elif key_node.tag == 'tag:yaml.org,2002:value': @@ -255,7 +255,7 @@ class Constructor(BaseConstructor): def construct_undefined(self, node): raise ConstructorError( None, None, - "could not determine a constructor for the tag %r" % node.tag, + 'could not determine a constructor for the tag %r' % node.tag, node.start_mark ) diff --git a/powerline/lint/markedjson/error.py b/powerline/lint/markedjson/error.py index 643c86b3..ec86d14a 100644 --- a/powerline/lint/markedjson/error.py +++ b/powerline/lint/markedjson/error.py @@ -57,10 +57,10 @@ class Mark: def __str__(self): snippet = self.get_snippet() - where = (" in \"%s\", line %d, column %d" % ( + where = (' in "%s", line %d, column %d' % ( self.name, self.line + 1, self.column + 1)) if snippet is not None: - where += ":\n" + snippet + where += ':\n' + snippet if type(where) is str: return where else: @@ -68,8 +68,9 @@ class Mark: def echoerr(*args, **kwargs): - sys.stderr.write('\n') - sys.stderr.write(format_error(*args, **kwargs) + '\n') + stream = kwargs.pop('stream', sys.stderr) + stream.write('\n') + stream.write(format_error(*args, **kwargs) + '\n') def format_error(context=None, context_mark=None, problem=None, problem_mark=None, note=None): @@ -98,3 +99,34 @@ def format_error(context=None, context_mark=None, problem=None, problem_mark=Non class MarkedError(Exception): def __init__(self, context=None, context_mark=None, problem=None, problem_mark=None, note=None): Exception.__init__(self, format_error(context, context_mark, problem, problem_mark, note)) + + +class EchoErr(object): + __slots__ = ('echoerr', 'logger',) + + def __init__(self, echoerr, logger): + self.echoerr = echoerr + self.logger = logger + + def __call__(self, *args, **kwargs): + self.echoerr(*args, **kwargs) + + +class DelayedEchoErr(EchoErr): + __slots__ = ('echoerr', 'logger', 'errs') + + def __init__(self, echoerr): + super(DelayedEchoErr, self).__init__(echoerr, echoerr.logger) + self.errs = [] + + def __call__(self, *args, **kwargs): + self.errs.append((args, kwargs)) + + def echo_all(self): + for args, kwargs in self.errs: + self.echoerr(*args, **kwargs) + + def __nonzero__(self): + return not not self.errs + + __bool__ = __nonzero__ diff --git a/powerline/lint/markedjson/parser.py b/powerline/lint/markedjson/parser.py index 960b74a3..336a2a21 100644 --- a/powerline/lint/markedjson/parser.py +++ b/powerline/lint/markedjson/parser.py @@ -88,7 +88,7 @@ class Parser: token = self.peek_token() self.echoerr( None, None, - ("expected '', but found %r" % token.id), token.start_mark + ('expected \'\', but found %r' % token.id), token.start_mark ) return events.StreamEndEvent(token.start_mark, token.end_mark) else: @@ -141,8 +141,8 @@ class Parser: else: token = self.peek_token() raise ParserError( - "while parsing a flow node", start_mark, - "expected the node content, but found %r" % token.id, + 'while parsing a flow node', start_mark, + 'expected the node content, but found %r' % token.id, token.start_mark ) return event @@ -160,14 +160,14 @@ class Parser: if self.check_token(tokens.FlowSequenceEndToken): token = self.peek_token() self.echoerr( - "While parsing a flow sequence", self.marks[-1], - ("expected sequence value, but got %r" % token.id), token.start_mark + 'While parsing a flow sequence', self.marks[-1], + ('expected sequence value, but got %r' % token.id), token.start_mark ) else: token = self.peek_token() raise ParserError( - "while parsing a flow sequence", self.marks[-1], - ("expected ',' or ']', but got %r" % token.id), token.start_mark + 'while parsing a flow sequence', self.marks[-1], + ('expected \',\' or \']\', but got %r' % token.id), token.start_mark ) if not self.check_token(tokens.FlowSequenceEndToken): @@ -197,14 +197,14 @@ class Parser: if self.check_token(tokens.FlowMappingEndToken): token = self.peek_token() self.echoerr( - "While parsing a flow mapping", self.marks[-1], - ("expected mapping key, but got %r" % token.id), token.start_mark + 'While parsing a flow mapping', self.marks[-1], + ('expected mapping key, but got %r' % token.id), token.start_mark ) else: token = self.peek_token() raise ParserError( - "while parsing a flow mapping", self.marks[-1], - ("expected ',' or '}', but got %r" % token.id), token.start_mark + 'while parsing a flow mapping', self.marks[-1], + ('expected \',\' or \'}\', but got %r' % token.id), token.start_mark ) if self.check_token(tokens.KeyToken): token = self.get_token() @@ -214,8 +214,8 @@ class Parser: else: token = self.peek_token() raise ParserError( - "while parsing a flow mapping", self.marks[-1], - ("expected value, but got %r" % token.id), token.start_mark + 'while parsing a flow mapping', self.marks[-1], + ('expected value, but got %r' % token.id), token.start_mark ) elif not self.check_token(tokens.FlowMappingEndToken): token = self.peek_token() @@ -226,14 +226,14 @@ class Parser: if expect_key: raise ParserError( - "while parsing a flow mapping", self.marks[-1], - ("expected string key, but got %r" % token.id), token.start_mark + 'while parsing a flow mapping', self.marks[-1], + ('expected string key, but got %r' % token.id), token.start_mark ) else: token = self.peek_token() raise ParserError( - "while parsing a flow mapping", self.marks[-1], - ("expected ':', but got %r" % token.id), token.start_mark + 'while parsing a flow mapping', self.marks[-1], + ('expected \':\', but got %r' % token.id), token.start_mark ) token = self.get_token() event = events.MappingEndEvent(token.start_mark, token.end_mark) @@ -250,6 +250,6 @@ class Parser: token = self.peek_token() raise ParserError( - "while parsing a flow mapping", self.marks[-1], - ("expected mapping value, but got %r" % token.id), token.start_mark + 'while parsing a flow mapping', self.marks[-1], + ('expected mapping value, but got %r' % token.id), token.start_mark ) diff --git a/powerline/lint/markedjson/reader.py b/powerline/lint/markedjson/reader.py index e212a2bd..bb518b06 100644 --- a/powerline/lint/markedjson/reader.py +++ b/powerline/lint/markedjson/reader.py @@ -7,7 +7,7 @@ from powerline.lint.markedjson.error import MarkedError, Mark, NON_PRINTABLE from powerline.lib.unicode import unicode -# This module contains abstractions for the input stream. You don't have to +# This module contains abstractions for the input stream. You don’t have to # looks further, there are no pretty code. @@ -24,7 +24,7 @@ class Reader(object): # Reader accepts # - a file-like object with its `read` method returning `str`, - # Yeah, it's ugly and slow. + # Yeah, it’s ugly and slow. def __init__(self, stream): self.name = None self.stream = None @@ -42,7 +42,7 @@ class Reader(object): self.column = 0 self.stream = stream - self.name = getattr(stream, 'name', "") + self.name = getattr(stream, 'name', '') self.eof = False self.raw_buffer = None diff --git a/powerline/lint/markedjson/scanner.py b/powerline/lint/markedjson/scanner.py index b4776f8a..543d7298 100644 --- a/powerline/lint/markedjson/scanner.py +++ b/powerline/lint/markedjson/scanner.py @@ -39,7 +39,7 @@ class SimpleKey: class Scanner: def __init__(self): - """Initialize the scanner.""" + '''Initialize the scanner.''' # It is assumed that Scanner and Reader will have a common descendant. # Reader do the dirty work of checking for BOM and converting the # input data to Unicode. It also adds NUL to the end. @@ -168,17 +168,17 @@ class Scanner: return self.fetch_value() # Is it a double quoted scalar? - if ch == '\"': + if ch == '"': return self.fetch_double() # It must be a plain scalar then. if self.check_plain(): return self.fetch_plain() - # No? It's an error. Let's produce a nice error message. + # No? It’s an error. Let’s produce a nice error message. raise ScannerError( - "while scanning for the next token", None, - "found character %r that cannot start any token" % ch, + 'while scanning for the next token', None, + 'found character %r that cannot start any token' % ch, self.get_mark() ) @@ -186,7 +186,7 @@ class Scanner: def next_possible_simple_key(self): # Return the number of the nearest possible simple key. Actually we - # don't need to loop through the whole dictionary. We may replace it + # don’t need to loop through the whole dictionary. We may replace it # with the following code: # if not self.possible_simple_keys: # return None @@ -211,11 +211,11 @@ class Scanner: del self.possible_simple_keys[level] def save_possible_simple_key(self): - # The next token may start a simple key. We check if it's possible + # The next token may start a simple key. We check if it’s possible # and save its position. This function is called for # SCALAR(flow), '[', and '{'. - # The next token might be a simple key. Let's save it's number and + # The next token might be a simple key. Let’s save it’s number and # position. if self.allow_simple_key: self.remove_possible_simple_key() @@ -364,7 +364,7 @@ class Scanner: def scan_flow_scalar(self): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted - # scalars don't need to adhere indentation because " and ' clearly + # scalars don’t need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. @@ -386,7 +386,7 @@ class Scanner: 'n': '\x0A', 'f': '\x0C', 'r': '\x0D', - '\"': '\"', + '"': '\"', '\\': '\\', } @@ -417,8 +417,8 @@ class Scanner: for k in range(length): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError( - "while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % ( + 'while scanning a double-quoted scalar', start_mark, + 'expected escape sequence of %d hexdecimal numbers, but found %r' % ( length, self.peek(k)), self.get_mark() ) @@ -427,8 +427,8 @@ class Scanner: self.forward(length) else: raise ScannerError( - "while scanning a double-quoted scalar", start_mark, - ("found unknown escape character %r" % ch), self.get_mark() + 'while scanning a double-quoted scalar', start_mark, + ('found unknown escape character %r' % ch), self.get_mark() ) else: return chunks @@ -444,13 +444,13 @@ class Scanner: ch = self.peek() if ch == '\0': raise ScannerError( - "while scanning a quoted scalar", start_mark, - "found unexpected end of stream", self.get_mark() + 'while scanning a quoted scalar', start_mark, + 'found unexpected end of stream', self.get_mark() ) elif ch == '\n': raise ScannerError( - "while scanning a quoted scalar", start_mark, - "found unexpected line end", self.get_mark() + 'while scanning a quoted scalar', start_mark, + 'found unexpected line end', self.get_mark() ) else: chunks.append(whitespaces) diff --git a/powerline/lint/selfcheck.py b/powerline/lint/selfcheck.py new file mode 100644 index 00000000..06d1fbe2 --- /dev/null +++ b/powerline/lint/selfcheck.py @@ -0,0 +1,16 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +from powerline.lib.unicode import unicode + + +def havemarks(*args, **kwargs): + origin = kwargs.get('origin', '') + for i, v in enumerate(args): + if not hasattr(v, 'mark'): + raise AssertionError('Value #{0}/{1} ({2!r}) has no attribute `mark`'.format(origin, i, v)) + if isinstance(v, dict): + for key, val in v.items(): + havemarks(key, val, origin=(origin + '[' + unicode(i) + ']/' + unicode(key))) + elif isinstance(v, list): + havemarks(*v, origin=(origin + '[' + unicode(i) + ']')) diff --git a/powerline/lint/spec.py b/powerline/lint/spec.py new file mode 100644 index 00000000..1d095721 --- /dev/null +++ b/powerline/lint/spec.py @@ -0,0 +1,724 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import itertools +import re + +from copy import copy + +from powerline.lib.unicode import unicode +from powerline.lint.markedjson.error import echoerr, DelayedEchoErr +from powerline.lint.selfcheck import havemarks + + +class Spec(object): + '''Class that describes some JSON value + + In powerline it is only used to describe JSON values stored in powerline + configuration. + + :param dict keys: + Dictionary that maps keys that may be present in the given JSON + dictionary to their descriptions. If this parameter is not empty it + implies that described value has dictionary type. Non-dictionary types + must be described using ``Spec()``: without arguments. + + .. note:: + Methods that create the specifications return ``self``, so calls to them + may be chained: ``Spec().type(unicode).re('^\w+$')``. This does not + apply to functions that *apply* specification like :py:meth`Spec.match`. + + .. note:: + Methods starting with ``check_`` return two values: first determines + whether caller should proceed on running other checks, second + determines whether there were any problems (i.e. whether error was + reported). One should not call these methods directly: there is + :py:meth:`Spec.match` method for checking values. + + .. note:: + In ``check_`` and ``match`` methods specifications are identified by + their indexes for the purpose of simplyfying :py:meth:`Spec.copy` + method. + + Some common parameters: + + ``data``: + Whatever data supplied by the first caller for checker functions. Is not + processed by :py:class:`Spec` methods in any fashion. + ``context``: + :py:class:`powerline.lint.context.Context` instance, describes context + of the value. :py:class:`Spec` methods only use its ``.key`` methods for + error messages. + ``echoerr``: + Callable that should be used to echo errors. Is supposed to take four + optional keyword arguments: ``problem``, ``problem_mark``, ``context``, + ``context_mark``. + ``value``: + Checked value. + ''' + + def __init__(self, **keys): + self.specs = [] + self.keys = {} + self.checks = [] + self.cmsg = '' + self.isoptional = False + self.uspecs = [] + self.ufailmsg = lambda key: 'found unknown key: {0}'.format(key) + self.did_type = False + self.update(**keys) + + def update(self, **keys): + '''Describe additional keys that may be present in given JSON value + + If called with some keyword arguments implies that described value is + a dictionary. If called without keyword parameters it is no-op. + + :return: self. + ''' + for k, v in keys.items(): + self.keys[k] = len(self.specs) + self.specs.append(v) + if self.keys and not self.did_type: + self.type(dict) + self.did_type = True + return self + + def copy(self, copied=None): + '''Deep copy the spec + + :param dict copied: + Internal dictionary used for storing already copied values. This + parameter should not be used. + + :return: New :py:class:`Spec` object that is a deep copy of ``self``. + ''' + copied = copied or {} + try: + return copied[id(self)] + except KeyError: + instance = self.__class__() + copied[id(self)] = instance + return self.__class__()._update(self.__dict__, copied) + + def _update(self, d, copied): + '''Helper for the :py:meth:`Spec.copy` function + + Populates new instance with values taken from the old one. + + :param dict d: + ``__dict__`` of the old instance. + :param dict copied: + Storage for already copied values. + ''' + self.__dict__.update(d) + self.keys = copy(self.keys) + self.checks = copy(self.checks) + self.uspecs = copy(self.uspecs) + self.specs = [spec.copy(copied) for spec in self.specs] + return self + + def unknown_spec(self, keyfunc, spec): + '''Define specification for non-static keys + + This method should be used if key names cannot be determined at runtime + or if a number of keys share identical spec (in order to not repeat it). + :py:meth:`Spec.match` method processes dictionary in the given order: + + * First it tries to use specifications provided at the initialization or + by the :py:meth:`Spec.update` method. + * If no specification for given key was provided it processes + specifications from ``keyfunc`` argument in order they were supplied. + Once some key matches specification supplied second ``spec`` argument + is used to determine correctness of the value. + + :param Spec keyfunc: + :py:class:`Spec` instance or a regular function that returns two + values (the same :py:meth:`Spec.match` returns). This argument is + used to match keys that were not provided at initialization or via + :py:meth:`Spec.update`. + :param Spec spec: + :py:class:`Spec` instance that will be used to check keys matched by + ``keyfunc``. + + :return: self. + ''' + if isinstance(keyfunc, Spec): + self.specs.append(keyfunc) + keyfunc = len(self.specs) - 1 + self.specs.append(spec) + self.uspecs.append((keyfunc, len(self.specs) - 1)) + return self + + def unknown_msg(self, msgfunc): + '''Define message which will be used when unknown key was found + + “Unknown” is a key that was not provided at the initialization and via + :py:meth:`Spec.update` and did not match any ``keyfunc`` proided via + :py:meth:`Spec.unknown_spec`. + + :param msgfunc: + Function that takes that unknown key as an argument and returns the + message text. Text will appear at the top (start of the sentence). + + :return: self. + ''' + self.ufailmsg = msgfunc + return self + + def context_message(self, msg): + '''Define message that describes context + + :param str msg: + Message that describes context. Is written using the + :py:meth:`str.format` syntax and is expected to display keyword + parameter ``key``. + + :return: self. + ''' + self.cmsg = msg + for spec in self.specs: + if not spec.cmsg: + spec.context_message(msg) + return self + + def check_type(self, value, context_mark, data, context, echoerr, types): + '''Check that given value matches given type(s) + + :param tuple types: + List of accepted types. Since :py:class:`Spec` is supposed to + describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``, + ``float`` and ``NoneType`` types make any sense. + + :return: proceed, hadproblem. + ''' + havemarks(value) + if type(value.value) not in types: + echoerr( + context=self.cmsg.format(key=context.key), + context_mark=context_mark, + problem='{0!r} must be a {1} instance, not {2}'.format( + value, + ', '.join((t.__name__ for t in types)), + type(value.value).__name__ + ), + problem_mark=value.mark + ) + return False, True + return True, False + + def check_func(self, value, context_mark, data, context, echoerr, func, msg_func): + '''Check value using given function + + :param function func: + Callable that should accept four positional parameters: + + #. checked value, + #. ``data`` parameter with arbitrary data (supplied by top-level + caller), + #. current context and + #. function used for echoing errors. + + This callable should return three values: + + #. determines whether ``check_func`` caller should proceed + calling other checks, + #. determines whether ``check_func`` should echo error on its own + (it should be set to False if ``func`` echoes error itself) and + #. determines whether function has found some errors in the checked + value. + + :param function msg_func: + Callable that takes checked value as the only positional parameter + and returns a string that describes the problem. Only useful for + small checker functions since it is ignored when second returned + value is false. + + :return: proceed, hadproblem. + ''' + havemarks(value) + proceed, echo, hadproblem = func(value, data, context, echoerr) + if echo and hadproblem: + echoerr(context=self.cmsg.format(key=context.key), + context_mark=context_mark, + problem=msg_func(value), + problem_mark=value.mark) + return proceed, hadproblem + + def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func): + '''Check that each value in the list matches given specification + + :param function item_func: + Callable like ``func`` from :py:meth:`Spec.check_func`. Unlike + ``func`` this callable is called for each value in the list and may + be a :py:class:`Spec` object index. + :param func msg_func: + Callable like ``msg_func`` from :py:meth:`Spec.check_func`. Should + accept one problematic item and is not used for :py:class:`Spec` + object indicies in ``item_func`` method. + + :return: proceed, hadproblem. + ''' + havemarks(value) + i = 0 + hadproblem = False + for item in value: + havemarks(item) + if isinstance(item_func, int): + spec = self.specs[item_func] + proceed, fhadproblem = spec.match( + item, + value.mark, + data, + context.enter_item('list item ' + unicode(i), item), + echoerr + ) + else: + proceed, echo, fhadproblem = item_func(item, data, context, echoerr) + if echo and fhadproblem: + echoerr(context=self.cmsg.format(key=context.key + '/list item ' + unicode(i)), + context_mark=value.mark, + problem=msg_func(item), + problem_mark=item.mark) + if fhadproblem: + hadproblem = True + if not proceed: + return proceed, hadproblem + i += 1 + return True, hadproblem + + def check_either(self, value, context_mark, data, context, echoerr, start, end): + '''Check that given value matches one of the given specifications + + :param int start: + First specification index. + :param int end: + Specification index that is greater by 1 then last specification + index. + + This method does not give an error if any specification from + ``self.specs[start:end]`` is matched by the given value. + ''' + havemarks(value) + new_echoerr = DelayedEchoErr(echoerr) + + hadproblem = False + for spec in self.specs[start:end]: + proceed, hadproblem = spec.match(value, value.mark, data, context, new_echoerr) + if not proceed: + break + if not hadproblem: + return True, False + + new_echoerr.echo_all() + + return False, hadproblem + + def check_tuple(self, value, context_mark, data, context, echoerr, start, end): + '''Check that given value is a list with items matching specifications + + :param int start: + First specification index. + :param int end: + Specification index that is greater by 1 then last specification + index. + + This method checks that each item in the value list matches + specification with index ``start + item_number``. + ''' + havemarks(value) + hadproblem = False + for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]): + proceed, ihadproblem = spec.match( + item, + value.mark, + data, + context.enter_item('tuple item ' + unicode(i), item), + echoerr + ) + if ihadproblem: + hadproblem = True + if not proceed: + return False, hadproblem + return True, hadproblem + + def type(self, *args): + '''Describe value that has one of the types given in arguments + + :param args: + List of accepted types. Since :py:class:`Spec` is supposed to + describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``, + ``float`` and ``NoneType`` types make any sense. + + :return: self. + ''' + self.checks.append(('check_type', args)) + return self + + cmp_funcs = { + 'le': lambda x, y: x <= y, + 'lt': lambda x, y: x < y, + 'ge': lambda x, y: x >= y, + 'gt': lambda x, y: x > y, + 'eq': lambda x, y: x == y, + } + + cmp_msgs = { + 'le': 'lesser or equal to', + 'lt': 'lesser then', + 'ge': 'greater or equal to', + 'gt': 'greater then', + 'eq': 'equal to', + } + + def len(self, comparison, cint, msg_func=None): + '''Describe value that has given length + + :param str comparison: + Type of the comparison. Valid values: ``le``, ``lt``, ``ge``, + ``gt``, ``eq``. + :param int cint: + Integer with which length is compared. + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit + something like “length of ['foo', 'bar'] is not greater then 10”. + + :return: self. + ''' + cmp_func = self.cmp_funcs[comparison] + msg_func = ( + msg_func + or (lambda value: 'length of {0!r} is not {1} {2}'.format( + value, self.cmp_msgs[comparison], cint)) + ) + self.checks.append(( + 'check_func', + (lambda value, *args: (True, True, not cmp_func(len(value), cint))), + msg_func + )) + return self + + def cmp(self, comparison, cint, msg_func=None): + '''Describe value that is a number or string that has given property + + :param str comparison: + Type of the comparison. Valid values: ``le``, ``lt``, ``ge``, + ``gt``, ``eq``. This argument will restrict the number or string to + emit True on the given comparison. + :param cint: + Number or string with which value is compared. Type of this + parameter affects required type of the checked value: ``str`` and + ``unicode`` types imply ``unicode`` values, ``float`` type implies + that value can be either ``int`` or ``float``, ``int`` type implies + ``int`` value and for any other type the behavior is undefined. + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit + something like “10 is not greater then 10”. + + :return: self. + ''' + if type(cint) is str: + self.type(unicode) + elif type(cint) is float: + self.type(int, float) + else: + self.type(type(cint)) + cmp_func = self.cmp_funcs[comparison] + msg_func = msg_func or (lambda value: '{0} is not {1} {2}'.format(value, self.cmp_msgs[comparison], cint)) + self.checks.append(( + 'check_func', + (lambda value, *args: (True, True, not cmp_func(value.value, cint))), + msg_func + )) + return self + + def unsigned(self, msg_func=None): + '''Describe unsigned integer value + + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. + + :return: self. + ''' + self.type(int) + self.checks.append(( + 'check_func', + (lambda value, *args: (True, True, value < 0)), + (lambda value: '{0} must be greater then zero'.format(value)) + )) + return self + + def list(self, item_func, msg_func=None): + '''Describe list with any number of elements, each matching given spec + + :param item_func: + :py:class:`Spec` instance or a callable. Check out + :py:meth:`Spec.check_list` documentation for more details. Note that + in :py:meth:`Spec.check_list` description :py:class:`Spec` instance + is replaced with its index in ``self.specs``. + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit just + “failed check”, which is rather indescriptive. + + :return: self. + ''' + self.type(list) + if isinstance(item_func, Spec): + self.specs.append(item_func) + item_func = len(self.specs) - 1 + self.checks.append(('check_list', item_func, msg_func or (lambda item: 'failed check'))) + return self + + def tuple(self, *specs): + '''Describe list with the given number of elements, each matching corresponding spec + + :param (Spec,) specs: + List of specifications. Last element(s) in this list may be + optional. Each element in this list describes element with the same + index in the checked value. Check out :py:meth:`Spec.check_tuple` + for more details, but note that there list of specifications is + replaced with start and end indicies in ``self.specs``. + + :return: self. + ''' + self.type(list) + + max_len = len(specs) + min_len = max_len + for spec in reversed(specs): + if spec.isoptional: + min_len -= 1 + else: + break + if max_len == min_len: + self.len('eq', len(specs)) + else: + self.len('ge', min_len) + self.len('le', max_len) + + start = len(self.specs) + for i, spec in zip(itertools.count(), specs): + self.specs.append(spec) + self.checks.append(('check_tuple', start, len(self.specs))) + return self + + def func(self, func, msg_func=None): + '''Describe value that is checked by the given function + + Check out :py:meth:`Spec.check_func` documentation for more details. + ''' + self.checks.append(('check_func', func, msg_func or (lambda value: 'failed check'))) + return self + + def re(self, regex, msg_func=None): + '''Describe value that is a string that matches given regular expression + + :param str regex: + Regular expression that should be matched by the value. + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit + something like “String "xyz" does not match "[a-f]+"”. + + :return: self. + ''' + self.type(unicode) + compiled = re.compile(regex) + msg_func = msg_func or (lambda value: 'String "{0}" does not match "{1}"'.format(value, regex)) + self.checks.append(( + 'check_func', + (lambda value, *args: (True, True, not compiled.match(value.value))), + msg_func + )) + return self + + def ident(self, msg_func=None): + '''Describe value that is an identifier like ``foo:bar`` or ``foo`` + + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit + something like “String "xyz" is not an … identifier”. + + :return: self. + ''' + msg_func = ( + msg_func + or (lambda value: 'String "{0}" is not an alphanumeric/underscore colon-separated identifier'.format(value)) + ) + return self.re('^\w+(?::\w+)?$', msg_func) + + def oneof(self, collection, msg_func=None): + '''Describe value that is equal to one of the value in the collection + + :param set collection: + A collection of possible values. + :param function msg_func: + Function that should accept checked value and return message that + describes the problem with this value. Default value will emit + something like “"xyz" must be one of {'abc', 'def', 'ghi'}”. + + :return: self. + ''' + msg_func = msg_func or (lambda value: '"{0}" must be one of {1!r}'.format(value, list(collection))) + self.checks.append(( + 'check_func', + (lambda value, *args: (True, True, value not in collection)), + msg_func + )) + return self + + def error(self, msg): + '''Describe value that must not be there + + Useful for giving more descriptive errors for some specific keys then + just “found unknown key: shutdown_event” or for forbidding certain + values when :py:meth:`Spec.unknown_spec` was used. + + :param str msg: + Message given for the offending value. It is formatted using + :py:meth:`str.format` with the only positional parameter which is + the value itself. + + :return: self. + ''' + self.checks.append(( + 'check_func', + (lambda *args: (True, True, True)), + (lambda value: msg.format(value)) + )) + return self + + def either(self, *specs): + '''Describes value that matches one of the given specs + + Check out :py:meth:`Spec.check_either` method documentation for more + details, but note that there a list of specs was replaced by start and + end indicies in ``self.specs``. + + :return: self. + ''' + start = len(self.specs) + self.specs.extend(specs) + self.checks.append(('check_either', start, len(self.specs))) + return self + + def optional(self): + '''Mark value as optional + + Only useful for key specs in :py:meth:`Spec.__init__` and + :py:meth:`Spec.update` and some last supplied to :py:meth:`Spec.tuple`. + + :return: self. + ''' + self.isoptional = True + return self + + def required(self): + '''Mark value as required + + Only useful for key specs in :py:meth:`Spec.__init__` and + :py:meth:`Spec.update` and some last supplied to :py:meth:`Spec.tuple`. + + .. note:: + Value is required by default. This method is only useful for + altering existing specification (or rather its copy). + + :return: self. + ''' + self.isoptional = False + return self + + def match_checks(self, *args): + '''Process checks registered for the given value + + Processes only “top-level” checks: key specifications given using at the + initialization or via :py:meth:`Spec.unknown_spec` are processed by + :py:meth:`Spec.match`. + + :return: proceed, hadproblem. + ''' + hadproblem = False + for check in self.checks: + proceed, chadproblem = getattr(self, check[0])(*(args + check[1:])) + if chadproblem: + hadproblem = True + if not proceed: + return False, hadproblem + return True, hadproblem + + def match(self, value, context_mark=None, data=None, context=(), echoerr=echoerr): + '''Check that given value matches this specification + + :return: proceed, hadproblem. + ''' + havemarks(value) + proceed, hadproblem = self.match_checks(value, context_mark, data, context, echoerr) + if proceed: + if self.keys or self.uspecs: + for key, vali in self.keys.items(): + valspec = self.specs[vali] + if key in value: + proceed, mhadproblem = valspec.match( + value[key], + value.mark, + data, + context.enter_key(value, key), + echoerr + ) + if mhadproblem: + hadproblem = True + if not proceed: + return False, hadproblem + else: + if not valspec.isoptional: + hadproblem = True + echoerr(context=self.cmsg.format(key=context.key), + context_mark=None, + problem='required key is missing: {0}'.format(key), + problem_mark=value.mark) + for key in value.keys(): + havemarks(key) + if key not in self.keys: + for keyfunc, vali in self.uspecs: + valspec = self.specs[vali] + if isinstance(keyfunc, int): + spec = self.specs[keyfunc] + proceed, khadproblem = spec.match(key, context_mark, data, context, echoerr) + else: + proceed, khadproblem = keyfunc(key, data, context, echoerr) + if khadproblem: + hadproblem = True + if proceed: + proceed, vhadproblem = valspec.match( + value[key], + value.mark, + data, + context.enter_key(value, key), + echoerr + ) + if vhadproblem: + hadproblem = True + break + else: + hadproblem = True + if self.ufailmsg: + echoerr(context=self.cmsg.format(key=context.key), + context_mark=None, + problem=self.ufailmsg(key), + problem_mark=key.mark) + return True, hadproblem + + def __getitem__(self, key): + '''Get specification for the given key + ''' + return self.specs[self.keys[key]] + + def __setitem__(self, key, value): + '''Set specification for the given key + ''' + self.update(**{key: value}) diff --git a/powerline/matchers/vim/__init__.py b/powerline/matchers/vim/__init__.py index d7e9b48d..b2bad4cc 100644 --- a/powerline/matchers/vim/__init__.py +++ b/powerline/matchers/vim/__init__.py @@ -3,7 +3,7 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import os -from powerline.bindings.vim import vim_getbufoption +from powerline.bindings.vim import vim_getbufoption, buffer_name def help(matcher_info): @@ -11,8 +11,8 @@ def help(matcher_info): def cmdwin(matcher_info): - name = matcher_info['buffer'].name - return name and os.path.basename(name) == '[Command Line]' + name = buffer_name(matcher_info) + return name and os.path.basename(name) == b'[Command Line]' def quickfix(matcher_info): diff --git a/powerline/matchers/vim/plugin/ctrlp.py b/powerline/matchers/vim/plugin/ctrlp.py index f2b05980..56028649 100644 --- a/powerline/matchers/vim/plugin/ctrlp.py +++ b/powerline/matchers/vim/plugin/ctrlp.py @@ -3,10 +3,13 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import os +from powerline.bindings.vim import buffer_name + + try: import vim except ImportError: - vim = object() + pass else: vim.command(''' function! Powerline_plugin_ctrlp_main(...) @@ -26,5 +29,5 @@ else: def ctrlp(matcher_info): - name = matcher_info['buffer'].name - return name and os.path.basename(name) == 'ControlP' + name = buffer_name(matcher_info) + return name and os.path.basename(name) == b'ControlP' diff --git a/powerline/matchers/vim/plugin/gundo.py b/powerline/matchers/vim/plugin/gundo.py index 356ffd65..e0fe3776 100644 --- a/powerline/matchers/vim/plugin/gundo.py +++ b/powerline/matchers/vim/plugin/gundo.py @@ -3,12 +3,14 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import os +from powerline.bindings.vim import buffer_name + def gundo(matcher_info): - name = matcher_info['buffer'].name - return name and os.path.basename(name) == '__Gundo__' + name = buffer_name(matcher_info) + return name and os.path.basename(name) == b'__Gundo__' def gundo_preview(matcher_info): - name = matcher_info['buffer'].name - return name and os.path.basename(name) == '__Gundo_Preview__' + name = buffer_name(matcher_info) + return name and os.path.basename(name) == b'__Gundo_Preview__' diff --git a/powerline/matchers/vim/plugin/nerdtree.py b/powerline/matchers/vim/plugin/nerdtree.py index aeb2c24c..3ec423df 100644 --- a/powerline/matchers/vim/plugin/nerdtree.py +++ b/powerline/matchers/vim/plugin/nerdtree.py @@ -4,7 +4,12 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import os import re +from powerline.bindings.vim import buffer_name + + +NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+') + def nerdtree(matcher_info): - name = matcher_info['buffer'].name - return name and re.match(r'NERD_tree_\d+', os.path.basename(name)) + name = buffer_name(matcher_info) + return name and NERD_TREE_RE.match(os.path.basename(name)) diff --git a/powerline/renderer.py b/powerline/renderer.py index dccc8f15..b6560a60 100644 --- a/powerline/renderer.py +++ b/powerline/renderer.py @@ -248,7 +248,7 @@ class Renderer(object): current_width = 0 if not width: - # No width specified, so we don't need to crop or pad anything + # No width specified, so we don’t need to crop or pad anything if output_width: current_width = self._render_length(theme, segments, self.compute_divider_widths(theme)) return construct_returned_value(''.join([ diff --git a/powerline/renderers/i3bar.py b/powerline/renderers/i3bar.py index 223458c2..9fbdaa0b 100644 --- a/powerline/renderers/i3bar.py +++ b/powerline/renderers/i3bar.py @@ -14,24 +14,24 @@ class I3barRenderer(Renderer): @staticmethod def hlstyle(*args, **kwargs): - # We don't need to explicitly reset attributes, so skip those calls + # We don’t need to explicitly reset attributes, so skip those calls return '' def hl(self, contents, fg=None, bg=None, attr=None): segment = { - "full_text": contents, - "separator": False, - "separator_block_width": 0, # no seperators + 'full_text': contents, + 'separator': False, + 'separator_block_width': 0, # no seperators } if fg is not None: if fg is not False and fg[1] is not False: - segment['color'] = "#{0:06x}".format(fg[1]) + segment['color'] = '#{0:06x}'.format(fg[1]) if bg is not None: if bg is not False and bg[1] is not False: - segment['background_color'] = "#{0:06x}".format(bg[1]) - # i3bar "pseudo json" requires one line at a time - return json.dumps(segment) + ",\n" + segment['background_color'] = '#{0:06x}'.format(bg[1]) + # i3bar “pseudo json” requires one line at a time + return json.dumps(segment) + ',\n' renderer = I3barRenderer diff --git a/powerline/renderers/pango_markup.py b/powerline/renderers/pango_markup.py index 02511ab5..e0dfa481 100644 --- a/powerline/renderers/pango_markup.py +++ b/powerline/renderers/pango_markup.py @@ -12,7 +12,7 @@ class PangoMarkupRenderer(Renderer): @staticmethod def hlstyle(*args, **kwargs): - # We don't need to explicitly reset attributes, so skip those calls + # We don’t need to explicitly reset attributes, so skip those calls return '' def hl(self, contents, fg=None, bg=None, attr=None): diff --git a/powerline/renderers/shell/__init__.py b/powerline/renderers/shell/__init__.py index b767fc26..3318ff22 100644 --- a/powerline/renderers/shell/__init__.py +++ b/powerline/renderers/shell/__init__.py @@ -82,7 +82,7 @@ class ShellRenderer(Renderer): If an argument is None, the argument is ignored. If an argument is False, the argument is reset to the terminal defaults. If an argument - is a valid color or attribute, it's added to the ANSI escape code. + is a valid color or attribute, it’s added to the ANSI escape code. ''' ansi = [0] if fg is not None: diff --git a/powerline/renderers/tmux.py b/powerline/renderers/tmux.py index 5c02f29b..97deda9f 100644 --- a/powerline/renderers/tmux.py +++ b/powerline/renderers/tmux.py @@ -13,7 +13,7 @@ class TmuxRenderer(Renderer): def hlstyle(self, fg=None, bg=None, attr=None): '''Highlight a segment.''' - # We don't need to explicitly reset attributes, so skip those calls + # We don’t need to explicitly reset attributes, so skip those calls if not attr and not bg and not fg: return '' tmux_attr = [] diff --git a/powerline/renderers/vim.py b/powerline/renderers/vim.py index 9cb806f7..58c690dd 100644 --- a/powerline/renderers/vim.py +++ b/powerline/renderers/vim.py @@ -9,10 +9,10 @@ from powerline.bindings.vim import vim_get_func, vim_getoption, environ, current from powerline.renderer import Renderer from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE from powerline.theme import Theme -from powerline.lib.unicode import unichr +from powerline.lib.unicode import unichr, register_strwidth_error -vim_mode = vim_get_func('mode', rettype=str) +vim_mode = vim_get_func('mode', rettype='unicode') if int(vim.eval('v:version')) >= 702: _vim_mode = vim_mode vim_mode = lambda: _vim_mode(1) @@ -41,6 +41,8 @@ class VimRenderer(Renderer): super(VimRenderer, self).__init__(*args, **kwargs) self.hl_groups = {} self.prev_highlight = None + self.strwidth_error_name = register_strwidth_error(self.strwidth) + self.encoding = vim.eval('&encoding') def shutdown(self): self.theme.shutdown() @@ -71,11 +73,10 @@ class VimRenderer(Renderer): if hasattr(vim, 'strwidth'): if sys.version_info < (3,): - @staticmethod - def strwidth(string): + def strwidth(self, string): # Does not work with tabs, but neither is strwidth from default # renderer - return vim.strwidth(string.encode('utf-8')) + return vim.strwidth(string.encode(self.encoding, 'replace')) else: @staticmethod def strwidth(string): @@ -101,6 +102,7 @@ class VimRenderer(Renderer): winnr=winnr, buffer=window.buffer, tabpage=current_tabpage(), + encoding=self.encoding, ) segment_info['tabnr'] = segment_info['tabpage'].number segment_info['bufnr'] = segment_info['buffer'].number @@ -115,6 +117,7 @@ class VimRenderer(Renderer): segment_info=segment_info, matcher_info=(None if is_tabline else segment_info), ) + statusline = statusline.encode(self.encoding, self.strwidth_error_name) return statusline def reset_highlight(self): @@ -125,7 +128,7 @@ class VimRenderer(Renderer): If an argument is None, the argument is ignored. If an argument is False, the argument is reset to the terminal defaults. If an argument - is a valid color or attribute, it's added to the vim highlight group. + is a valid color or attribute, it’s added to the vim highlight group. ''' # In order not to hit E541 two consequent identical highlighting # specifiers may be squashed into one. @@ -134,7 +137,7 @@ class VimRenderer(Renderer): return '' self.prev_highlight = (fg, bg, attr) - # We don't need to explicitly reset attributes in vim, so skip those + # We don’t need to explicitly reset attributes in vim, so skip those # calls if not attr and not bg and not fg: return '' diff --git a/powerline/segments/common.py b/powerline/segments/common.py deleted file mode 100644 index bb83ea89..00000000 --- a/powerline/segments/common.py +++ /dev/null @@ -1,1469 +0,0 @@ -# vim:fileencoding=utf-8:noet -from __future__ import (unicode_literals, division, absolute_import, print_function) - -import os -import sys -import re -import socket - -from datetime import datetime -from multiprocessing import cpu_count as _cpu_count -from collections import namedtuple - -from powerline.lib import add_divider_highlight_group -from powerline.lib.shell import asrun, run_cmd -from powerline.lib.url import urllib_read, urllib_urlencode -from powerline.lib.vcs import guess, tree_status -from powerline.lib.threaded import ThreadedSegment, KwThreadedSegment -from powerline.lib.monotonic import monotonic -from powerline.lib.humanize_bytes import humanize_bytes -from powerline.lib.unicode import u -from powerline.theme import requires_segment_info, requires_filesystem_watcher -from powerline.segments import Segment, with_docstring - - -cpu_count = None - - -@requires_segment_info -def environment(pl, segment_info, variable=None): - '''Return the value of any defined environment variable - - :param string variable: - The environment variable to return if found - ''' - return segment_info['environ'].get(variable, None) - - -@requires_segment_info -def hostname(pl, segment_info, only_if_ssh=False, exclude_domain=False): - '''Return the current hostname. - - :param bool only_if_ssh: - only return the hostname if currently in an SSH session - :param bool exclude_domain: - return the hostname without domain if there is one - ''' - if only_if_ssh and not segment_info['environ'].get('SSH_CLIENT'): - return None - if exclude_domain: - return socket.gethostname().split('.')[0] - return socket.gethostname() - - -@requires_filesystem_watcher -@requires_segment_info -def branch(pl, segment_info, create_watcher, status_colors=False): - '''Return the current VCS branch. - - :param bool status_colors: - determines whether repository status will be used to determine highlighting. Default: False. - - Highlight groups used: ``branch_clean``, ``branch_dirty``, ``branch``. - ''' - name = segment_info['getcwd']() - repo = guess(path=name, create_watcher=create_watcher) - if repo is not None: - branch = repo.branch() - scol = ['branch'] - if status_colors: - try: - status = tree_status(repo, pl) - except Exception as e: - pl.exception('Failed to compute tree status: {0}', str(e)) - status = '?' - scol.insert(0, 'branch_dirty' if status and status.strip() else 'branch_clean') - return [{ - 'contents': branch, - 'highlight_group': scol, - }] - - -@requires_segment_info -class CwdSegment(Segment): - def argspecobjs(self): - for obj in super(CwdSegment, self).argspecobjs(): - yield obj - yield 'get_shortened_path', self.get_shortened_path - - def omitted_args(self, name, method): - if method is self.get_shortened_path: - return (0, 1, 2) - else: - return super(CwdSegment, self).omitted_args(name, method) - - def get_shortened_path(self, pl, segment_info, shorten_home=True, **kwargs): - try: - path = u(segment_info['getcwd']()) - except OSError as e: - if e.errno == 2: - # user most probably deleted the directory - # this happens when removing files from Mercurial repos for example - pl.warn('Current directory not found') - return "[not found]" - else: - raise - if shorten_home: - home = segment_info['home'] - if home: - home = u(home) - if path.startswith(home): - path = '~' + path[len(home):] - return path - - def __call__(self, pl, segment_info, - dir_shorten_len=None, - dir_limit_depth=None, - use_path_separator=False, - ellipsis='...', - **kwargs): - cwd = self.get_shortened_path(pl, segment_info, **kwargs) - cwd_split = cwd.split(os.sep) - cwd_split_len = len(cwd_split) - cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]] - if dir_limit_depth and cwd_split_len > dir_limit_depth + 1: - del(cwd[0:-dir_limit_depth]) - if ellipsis is not None: - cwd.insert(0, ellipsis) - ret = [] - if not cwd[0]: - cwd[0] = '/' - draw_inner_divider = not use_path_separator - for part in cwd: - if not part: - continue - if use_path_separator: - part += os.sep - ret.append({ - 'contents': part, - 'divider_highlight_group': 'cwd:divider', - 'draw_inner_divider': draw_inner_divider, - }) - ret[-1]['highlight_group'] = ['cwd:current_folder', 'cwd'] - if use_path_separator: - ret[-1]['contents'] = ret[-1]['contents'][:-1] - if len(ret) > 1 and ret[0]['contents'][0] == os.sep: - ret[0]['contents'] = ret[0]['contents'][1:] - return ret - - -cwd = with_docstring(CwdSegment(), -'''Return the current working directory. - -Returns a segment list to create a breadcrumb-like effect. - -:param int dir_shorten_len: - shorten parent directory names to this length (e.g. - :file:`/long/path/to/powerline` → :file:`/l/p/t/powerline`) -:param int dir_limit_depth: - limit directory depth to this number (e.g. - :file:`/long/path/to/powerline` → :file:`⋯/to/powerline`) -:param bool use_path_separator: - Use path separator in place of soft divider. -:param bool shorten_home: - Shorten home directory to ``~``. -:param str ellipsis: - Specifies what to use in place of omitted directories. Use None to not - show this subsegment at all. - -Divider highlight group used: ``cwd:divider``. - -Highlight groups used: ``cwd:current_folder`` or ``cwd``. It is recommended to define all highlight groups. -''') - - -def date(pl, format='%Y-%m-%d', istime=False): - '''Return the current date. - - :param str format: - strftime-style date format string - :param bool istime: - If true then segment uses ``time`` highlight group. - - Divider highlight group used: ``time:divider``. - - Highlight groups used: ``time`` or ``date``. - ''' - return [{ - 'contents': datetime.now().strftime(format), - 'highlight_group': (['time'] if istime else []) + ['date'], - 'divider_highlight_group': 'time:divider' if istime else None, - }] - - -UNICODE_TEXT_TRANSLATION = { - ord('\''): '’', - ord('-'): '‐', -} - - -def fuzzy_time(pl, unicode_text=False): - '''Display the current time as fuzzy time, e.g. "quarter past six". - - :param bool unicode_text: - If true then hyphenminuses (regular ASCII ``-``) and single quotes are - replaced with unicode dashes and apostrophes. - ''' - hour_str = ['twelve', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven'] - minute_str = { - 5: 'five past', - 10: 'ten past', - 15: 'quarter past', - 20: 'twenty past', - 25: 'twenty-five past', - 30: 'half past', - 35: 'twenty-five to', - 40: 'twenty to', - 45: 'quarter to', - 50: 'ten to', - 55: 'five to', - } - special_case_str = { - (23, 58): 'round about midnight', - (23, 59): 'round about midnight', - (0, 0): 'midnight', - (0, 1): 'round about midnight', - (0, 2): 'round about midnight', - (12, 0): 'noon', - } - - now = datetime.now() - - try: - return special_case_str[(now.hour, now.minute)] - except KeyError: - pass - - hour = now.hour - if now.minute > 32: - if hour == 23: - hour = 0 - else: - hour += 1 - if hour > 11: - hour = hour - 12 - hour = hour_str[hour] - - minute = int(round(now.minute / 5.0) * 5) - if minute == 60 or minute == 0: - result = ' '.join([hour, 'o\'clock']) - else: - minute = minute_str[minute] - result = ' '.join([minute, hour]) - - if unicode_text: - result = result.translate(UNICODE_TEXT_TRANSLATION) - - return result - - -def _external_ip(query_url='http://ipv4.icanhazip.com/'): - return urllib_read(query_url).strip() - - -class ExternalIpSegment(ThreadedSegment): - interval = 300 - - def set_state(self, query_url='http://ipv4.icanhazip.com/', **kwargs): - self.query_url = query_url - super(ExternalIpSegment, self).set_state(**kwargs) - - def update(self, old_ip): - return _external_ip(query_url=self.query_url) - - def render(self, ip, **kwargs): - if not ip: - return None - return [{'contents': ip, 'divider_highlight_group': 'background:divider'}] - - -external_ip = with_docstring(ExternalIpSegment(), -'''Return external IP address. - -:param str query_url: - URI to query for IP address, should return only the IP address as a text string - - Suggested URIs: - - * http://ipv4.icanhazip.com/ - * http://ipv6.icanhazip.com/ - * http://icanhazip.com/ (returns IPv6 address if available, else IPv4) - -Divider highlight group used: ``background:divider``. -''') - - -try: - import netifaces -except ImportError: - def internal_ip(pl, interface='detect', ipv=4): - return None -else: - _interface_starts = { - 'eth': 10, # Regular ethernet adapters : eth1 - 'enp': 10, # Regular ethernet adapters, Gentoo : enp2s0 - 'ath': 9, # Atheros WiFi adapters : ath0 - 'wlan': 9, # Other WiFi adapters : wlan1 - 'wlp': 9, # Other WiFi adapters, Gentoo : wlp5s0 - 'teredo': 1, # miredo interface : teredo - 'lo': -10, # Loopback interface : lo - } - - _interface_start_re = re.compile(r'^([a-z]+?)(\d|$)') - - def _interface_key(interface): - match = _interface_start_re.match(interface) - if match: - try: - base = _interface_starts[match.group(1)] * 100 - except KeyError: - base = 500 - if match.group(2): - return base - int(match.group(2)) - else: - return base - else: - return 0 - - def internal_ip(pl, interface='detect', ipv=4): - if interface == 'detect': - try: - interface = next(iter(sorted(netifaces.interfaces(), key=_interface_key, reverse=True))) - except StopIteration: - pl.info('No network interfaces found') - return None - addrs = netifaces.ifaddresses(interface) - try: - return addrs[netifaces.AF_INET6 if ipv == 6 else netifaces.AF_INET][0]['addr'] - except (KeyError, IndexError): - return None - - -internal_ip = with_docstring(internal_ip, -'''Return internal IP address - -Requires ``netifaces`` package to work properly. - -:param str interface: - Interface on which IP will be checked. Use ``detect`` to automatically - detect interface. In this case interfaces with lower numbers will be - preferred over interfaces with similar names. Order of preference based on - names: - - #. ``eth`` and ``enp`` followed by number or the end of string. - #. ``ath``, ``wlan`` and ``wlp`` followed by number or the end of string. - #. ``teredo`` followed by number or the end of string. - #. Any other interface that is not ``lo*``. - #. ``lo`` followed by number or the end of string. -:param int ipv: - 4 or 6 for ipv4 and ipv6 respectively, depending on which IP address you - need exactly. -''') - - -# Weather condition code descriptions available at -# http://developer.yahoo.com/weather/#codes -weather_conditions_codes = ( - ('tornado', 'stormy'), # 0 - ('tropical_storm', 'stormy'), # 1 - ('hurricane', 'stormy'), # 2 - ('severe_thunderstorms', 'stormy'), # 3 - ('thunderstorms', 'stormy'), # 4 - ('mixed_rain_and_snow', 'rainy' ), # 5 - ('mixed_rain_and_sleet', 'rainy' ), # 6 - ('mixed_snow_and_sleet', 'snowy' ), # 7 - ('freezing_drizzle', 'rainy' ), # 8 - ('drizzle', 'rainy' ), # 9 - ('freezing_rain', 'rainy' ), # 10 - ('showers', 'rainy' ), # 11 - ('showers', 'rainy' ), # 12 - ('snow_flurries', 'snowy' ), # 13 - ('light_snow_showers', 'snowy' ), # 14 - ('blowing_snow', 'snowy' ), # 15 - ('snow', 'snowy' ), # 16 - ('hail', 'snowy' ), # 17 - ('sleet', 'snowy' ), # 18 - ('dust', 'foggy' ), # 19 - ('fog', 'foggy' ), # 20 - ('haze', 'foggy' ), # 21 - ('smoky', 'foggy' ), # 22 - ('blustery', 'foggy' ), # 23 - ('windy', ), # 24 - ('cold', 'day' ), # 25 - ('clouds', 'cloudy'), # 26 - ('mostly_cloudy_night', 'cloudy'), # 27 - ('mostly_cloudy_day', 'cloudy'), # 28 - ('partly_cloudy_night', 'cloudy'), # 29 - ('partly_cloudy_day', 'cloudy'), # 30 - ('clear_night', 'night' ), # 31 - ('sun', 'sunny' ), # 32 - ('fair_night', 'night' ), # 33 - ('fair_day', 'day' ), # 34 - ('mixed_rain_and_hail', 'rainy' ), # 35 - ('hot', 'sunny' ), # 36 - ('isolated_thunderstorms', 'stormy'), # 37 - ('scattered_thunderstorms', 'stormy'), # 38 - ('scattered_thunderstorms', 'stormy'), # 39 - ('scattered_showers', 'rainy' ), # 40 - ('heavy_snow', 'snowy' ), # 41 - ('scattered_snow_showers', 'snowy' ), # 42 - ('heavy_snow', 'snowy' ), # 43 - ('partly_cloudy', 'cloudy'), # 44 - ('thundershowers', 'rainy' ), # 45 - ('snow_showers', 'snowy' ), # 46 - ('isolated_thundershowers', 'rainy' ), # 47 -) -# ('day', (25, 34)), -# ('rainy', (5, 6, 8, 9, 10, 11, 12, 35, 40, 45, 47)), -# ('cloudy', (26, 27, 28, 29, 30, 44)), -# ('snowy', (7, 13, 14, 15, 16, 17, 18, 41, 42, 43, 46)), -# ('stormy', (0, 1, 2, 3, 4, 37, 38, 39)), -# ('foggy', (19, 20, 21, 22, 23)), -# ('sunny', (32, 36)), -# ('night', (31, 33))): -weather_conditions_icons = { - 'day': 'DAY', - 'blustery': 'WIND', - 'rainy': 'RAIN', - 'cloudy': 'CLOUDS', - 'snowy': 'SNOW', - 'stormy': 'STORM', - 'foggy': 'FOG', - 'sunny': 'SUN', - 'night': 'NIGHT', - 'windy': 'WINDY', - 'not_available': 'NA', - 'unknown': 'UKN', -} - -temp_conversions = { - 'C': lambda temp: temp, - 'F': lambda temp: (temp * 9 / 5) + 32, - 'K': lambda temp: temp + 273.15, -} - -# Note: there are also unicode characters for units: ℃, ℉ and K -temp_units = { - 'C': '°C', - 'F': '°F', - 'K': 'K', -} - - -class WeatherSegment(ThreadedSegment): - interval = 600 - - def set_state(self, location_query=None, **kwargs): - self.location = location_query - self.url = None - super(WeatherSegment, self).set_state(**kwargs) - - def update(self, old_weather): - import json - - if not self.url: - # Do not lock attribute assignments in this branch: they are used - # only in .update() - if not self.location: - location_data = json.loads(urllib_read('http://freegeoip.net/json/')) - self.location = ','.join(( - location_data['city'], - location_data['region_code'], - location_data['country_code'] - )) - query_data = { - 'q': - 'use "https://raw.githubusercontent.com/yql/yql-tables/master/weather/weather.bylocation.xml" as we;' - 'select * from we where location="{0}" and unit="c"'.format(self.location).encode('utf-8'), - 'format': 'json', - } - self.url = 'http://query.yahooapis.com/v1/public/yql?' + urllib_urlencode(query_data) - - raw_response = urllib_read(self.url) - if not raw_response: - self.error('Failed to get response') - return - response = json.loads(raw_response) - condition = response['query']['results']['weather']['rss']['channel']['item']['condition'] - condition_code = int(condition['code']) - temp = float(condition['temp']) - - try: - icon_names = weather_conditions_codes[condition_code] - except IndexError: - if condition_code == 3200: - icon_names = ('not_available',) - self.warn('Weather is not available for location {0}', self.location) - else: - icon_names = ('unknown',) - self.error('Unknown condition code: {0}', condition_code) - - return (temp, icon_names) - - def render(self, weather, icons=None, unit='C', temp_format=None, temp_coldest=-30, temp_hottest=40, **kwargs): - if not weather: - return None - - temp, icon_names = weather - - for icon_name in icon_names: - if icons: - if icon_name in icons: - icon = icons[icon_name] - break - else: - icon = weather_conditions_icons[icon_names[-1]] - - temp_format = temp_format or ('{temp:.0f}' + temp_units[unit]) - converted_temp = temp_conversions[unit](temp) - if temp <= temp_coldest: - gradient_level = 0 - elif temp >= temp_hottest: - gradient_level = 100 - else: - gradient_level = (temp - temp_coldest) * 100.0 / (temp_hottest - temp_coldest) - groups = ['weather_condition_' + icon_name for icon_name in icon_names] + ['weather_conditions', 'weather'] - return [ - { - 'contents': icon + ' ', - 'highlight_group': groups, - 'divider_highlight_group': 'background:divider', - }, - { - 'contents': temp_format.format(temp=converted_temp), - 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], - 'divider_highlight_group': 'background:divider', - 'gradient_level': gradient_level, - }, - ] - - -weather = with_docstring(WeatherSegment(), -'''Return weather from Yahoo! Weather. - -Uses GeoIP lookup from http://freegeoip.net/ to automatically determine -your current location. This should be changed if you're in a VPN or if your -IP address is registered at another location. - -Returns a list of colorized icon and temperature segments depending on -weather conditions. - -:param str unit: - temperature unit, can be one of ``F``, ``C`` or ``K`` -:param str location_query: - location query for your current location, e.g. ``oslo, norway`` -:param dict icons: - dict for overriding default icons, e.g. ``{'heavy_snow' : u'❆'}`` -:param str temp_format: - format string, receives ``temp`` as an argument. Should also hold unit. -:param float temp_coldest: - coldest temperature. Any temperature below it will have gradient level equal - to zero. -:param float temp_hottest: - hottest temperature. Any temperature above it will have gradient level equal - to 100. Temperatures between ``temp_coldest`` and ``temp_hottest`` receive - gradient level that indicates relative position in this interval - (``100 * (cur-coldest) / (hottest-coldest)``). - -Divider highlight group used: ``background:divider``. - -Highlight groups used: ``weather_conditions`` or ``weather``, ``weather_temp_gradient`` (gradient) or ``weather``. -Also uses ``weather_conditions_{condition}`` for all weather conditions supported by Yahoo. -''') - - -def system_load(pl, format='{avg:.1f}', threshold_good=1, threshold_bad=2, track_cpu_count=False): - '''Return system load average. - - Highlights using ``system_load_good``, ``system_load_bad`` and - ``system_load_ugly`` highlighting groups, depending on the thresholds - passed to the function. - - :param str format: - format string, receives ``avg`` as an argument - :param float threshold_good: - threshold for gradient level 0: any normalized load average below this - value will have this gradient level. - :param float threshold_bad: - threshold for gradient level 100: any normalized load average above this - value will have this gradient level. Load averages between - ``threshold_good`` and ``threshold_bad`` receive gradient level that - indicates relative position in this interval: - (``100 * (cur-good) / (bad-good)``). - Note: both parameters are checked against normalized load averages. - :param bool track_cpu_count: - if True powerline will continuously poll the system to detect changes - in the number of CPUs. - - Divider highlight group used: ``background:divider``. - - Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``. - ''' - global cpu_count - try: - cpu_num = cpu_count = _cpu_count() if cpu_count is None or track_cpu_count else cpu_count - except NotImplementedError: - pl.warn('Unable to get CPU count: method is not implemented') - return None - ret = [] - for avg in os.getloadavg(): - normalized = avg / cpu_num - if normalized < threshold_good: - gradient_level = 0 - elif normalized < threshold_bad: - gradient_level = (normalized - threshold_good) * 100.0 / (threshold_bad - threshold_good) - else: - gradient_level = 100 - ret.append({ - 'contents': format.format(avg=avg), - 'highlight_group': ['system_load_gradient', 'system_load'], - 'divider_highlight_group': 'background:divider', - 'gradient_level': gradient_level, - }) - ret[0]['contents'] += ' ' - ret[1]['contents'] += ' ' - return ret - - -try: - import psutil - - def _get_bytes(interface): - try: - io_counters = psutil.net_io_counters(pernic=True) - except AttributeError: - io_counters = psutil.network_io_counters(pernic=True) - if_io = io_counters.get(interface) - if not if_io: - return None - return if_io.bytes_recv, if_io.bytes_sent - - def _get_interfaces(): - io_counters = psutil.network_io_counters(pernic=True) - for interface, data in io_counters.items(): - if data: - yield interface, data.bytes_recv, data.bytes_sent - - # psutil-2.0.0: psutil.Process.username is unbound method - if callable(psutil.Process.username): - def _get_user(segment_info): - return psutil.Process(os.getpid()).username() - # pre psutil-2.0.0: psutil.Process.username has type property - else: - def _get_user(segment_info): - return psutil.Process(os.getpid()).username - - class CPULoadPercentSegment(ThreadedSegment): - interval = 1 - - def update(self, old_cpu): - return psutil.cpu_percent(interval=None) - - def run(self): - while not self.shutdown_event.is_set(): - try: - self.update_value = psutil.cpu_percent(interval=self.interval) - except Exception as e: - self.exception('Exception while calculating cpu_percent: {0}', str(e)) - - def render(self, cpu_percent, format='{0:.0f}%', **kwargs): - if not cpu_percent: - return None - return [{ - 'contents': format.format(cpu_percent), - 'gradient_level': cpu_percent, - 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], - }] -except ImportError: - def _get_bytes(interface): - with open('/sys/class/net/{interface}/statistics/rx_bytes'.format(interface=interface), 'rb') as file_obj: - rx = int(file_obj.read()) - with open('/sys/class/net/{interface}/statistics/tx_bytes'.format(interface=interface), 'rb') as file_obj: - tx = int(file_obj.read()) - return (rx, tx) - - def _get_interfaces(): - for interface in os.listdir('/sys/class/net'): - x = _get_bytes(interface) - if x is not None: - yield interface, x[0], x[1] - - def _get_user(segment_info): - return segment_info['environ'].get('USER', None) - - class CPULoadPercentSegment(ThreadedSegment): - interval = 1 - - @staticmethod - def startup(**kwargs): - pass - - @staticmethod - def start(): - pass - - @staticmethod - def shutdown(): - pass - - @staticmethod - def render(cpu_percent, pl, format='{0:.0f}%', **kwargs): - pl.warn('psutil package is not installed, thus CPU load is not available') - return None - - -cpu_load_percent = with_docstring(CPULoadPercentSegment(), -'''Return the average CPU load as a percentage. - -Requires the ``psutil`` module. - -:param str format: - Output format. Accepts measured CPU load as the first argument. - -Highlight groups used: ``cpu_load_percent_gradient`` (gradient) or ``cpu_load_percent``. -''') - - -username = False -# os.geteuid is not available on windows -_geteuid = getattr(os, 'geteuid', lambda: 1) - - -def user(pl, segment_info=None, hide_user=None): - '''Return the current user. - - :param str hide_user: - Omit showing segment for users with names equal to this string. - - Highlights the user with the ``superuser`` if the effective user ID is 0. - - Highlight groups used: ``superuser`` or ``user``. It is recommended to define all highlight groups. - ''' - global username - if username is False: - username = _get_user(segment_info) - if username is None: - pl.warn('Failed to get username') - return None - if username == hide_user: - return None - euid = _geteuid() - return [{ - 'contents': username, - 'highlight_group': ['user'] if euid != 0 else ['superuser', 'user'], - }] -if 'psutil' not in globals(): - user = requires_segment_info(user) - - -if os.path.exists('/proc/uptime'): - def _get_uptime(): - with open('/proc/uptime', 'r') as f: - return int(float(f.readline().split()[0])) -elif 'psutil' in globals(): - from time import time - - def _get_uptime(): - # psutil.BOOT_TIME is not subject to clock adjustments, but time() is. - # Thus it is a fallback to /proc/uptime reading and not the reverse. - return int(time() - psutil.BOOT_TIME) -else: - def _get_uptime(): - raise NotImplementedError - - -@add_divider_highlight_group('background:divider') -def uptime(pl, days_format='{days:d}d', hours_format=' {hours:d}h', minutes_format=' {minutes:d}m', seconds_format=' {seconds:d}s', shorten_len=3): - '''Return system uptime. - - :param str days_format: - day format string, will be passed ``days`` as the argument - :param str hours_format: - hour format string, will be passed ``hours`` as the argument - :param str minutes_format: - minute format string, will be passed ``minutes`` as the argument - :param str seconds_format: - second format string, will be passed ``seconds`` as the argument - :param int shorten_len: - shorten the amount of units (days, hours, etc.) displayed - - Divider highlight group used: ``background:divider``. - ''' - try: - seconds = _get_uptime() - except NotImplementedError: - pl.warn('Unable to get uptime. You should install psutil package') - return None - minutes, seconds = divmod(seconds, 60) - hours, minutes = divmod(minutes, 60) - days, hours = divmod(hours, 24) - time_formatted = list(filter(None, [ - days_format.format(days=days) if days and days_format else None, - hours_format.format(hours=hours) if hours and hours_format else None, - minutes_format.format(minutes=minutes) if minutes and minutes_format else None, - seconds_format.format(seconds=seconds) if seconds and seconds_format else None, - ]))[0:shorten_len] - return ''.join(time_formatted).strip() - - -class NetworkLoadSegment(KwThreadedSegment): - interfaces = {} - replace_num_pat = re.compile(r'[a-zA-Z]+') - - @staticmethod - def key(interface='detect', **kwargs): - return interface - - def compute_state(self, interface): - if interface == 'detect': - proc_exists = getattr(self, 'proc_exists', None) - if proc_exists is None: - proc_exists = self.proc_exists = os.path.exists('/proc/net/route') - if proc_exists: - # Look for default interface in routing table - with open('/proc/net/route', 'rb') as f: - for line in f.readlines(): - parts = line.split() - if len(parts) > 1: - iface, destination = parts[:2] - if not destination.replace(b'0', b''): - interface = iface.decode('utf-8') - break - if interface == 'detect': - # Choose interface with most total activity, excluding some - # well known interface names - interface, total = 'eth0', -1 - for name, rx, tx in _get_interfaces(): - base = self.replace_num_pat.match(name) - if None in (base, rx, tx) or base.group() in ('lo', 'vmnet', 'sit'): - continue - activity = rx + tx - if activity > total: - total = activity - interface = name - - try: - idata = self.interfaces[interface] - try: - idata['prev'] = idata['last'] - except KeyError: - pass - except KeyError: - idata = {} - if self.run_once: - idata['prev'] = (monotonic(), _get_bytes(interface)) - self.shutdown_event.wait(self.interval) - self.interfaces[interface] = idata - - idata['last'] = (monotonic(), _get_bytes(interface)) - return idata.copy() - - def render_one(self, idata, recv_format='DL {value:>8}', sent_format='UL {value:>8}', suffix='B/s', si_prefix=False, **kwargs): - if not idata or 'prev' not in idata: - return None - - t1, b1 = idata['prev'] - t2, b2 = idata['last'] - measure_interval = t2 - t1 - - if None in (b1, b2): - return None - - r = [] - for i, key in zip((0, 1), ('recv', 'sent')): - format = locals()[key + '_format'] - try: - value = (b2[i] - b1[i]) / measure_interval - except ZeroDivisionError: - self.warn('Measure interval zero.') - value = 0 - max_key = key + '_max' - is_gradient = max_key in kwargs - hl_groups = ['network_load_' + key, 'network_load'] - if is_gradient: - hl_groups[:0] = (group + '_gradient' for group in hl_groups) - r.append({ - 'contents': format.format(value=humanize_bytes(value, suffix, si_prefix)), - 'divider_highlight_group': 'background:divider', - 'highlight_group': hl_groups, - }) - if is_gradient: - max = kwargs[max_key] - if value >= max: - r[-1]['gradient_level'] = 100 - else: - r[-1]['gradient_level'] = value * 100.0 / max - - return r - - -network_load = with_docstring(NetworkLoadSegment(), -'''Return the network load. - -Uses the ``psutil`` module if available for multi-platform compatibility, -falls back to reading -:file:`/sys/class/net/{interface}/statistics/{rx,tx}_bytes`. - -:param str interface: - network interface to measure (use the special value "detect" to have powerline try to auto-detect the network interface) -:param str suffix: - string appended to each load string -:param bool si_prefix: - use SI prefix, e.g. MB instead of MiB -:param str recv_format: - format string, receives ``value`` as argument -:param str sent_format: - format string, receives ``value`` as argument -:param float recv_max: - maximum number of received bytes per second. Is only used to compute - gradient level -:param float sent_max: - maximum number of sent bytes per second. Is only used to compute gradient - level - -Divider highlight group used: ``background:divider``. - -Highlight groups used: ``network_load_sent_gradient`` (gradient) or ``network_load_recv_gradient`` (gradient) or ``network_load_gradient`` (gradient), ``network_load_sent`` or ``network_load_recv`` or ``network_load``. -''') - - -@requires_segment_info -def virtualenv(pl, segment_info): - '''Return the name of the current Python virtualenv.''' - return os.path.basename(segment_info['environ'].get('VIRTUAL_ENV', '')) or None - - -_IMAPKey = namedtuple('Key', 'username password server port folder') - - -class EmailIMAPSegment(KwThreadedSegment): - interval = 60 - - @staticmethod - def key(username, password, server='imap.gmail.com', port=993, folder='INBOX', **kwargs): - return _IMAPKey(username, password, server, port, folder) - - def compute_state(self, key): - if not key.username or not key.password: - self.warn('Username and password are not configured') - return None - try: - import imaplib - except imaplib.IMAP4.error as e: - unread_count = str(e) - else: - mail = imaplib.IMAP4_SSL(key.server, key.port) - mail.login(key.username, key.password) - rc, message = mail.status(key.folder, '(UNSEEN)') - unread_str = message[0].decode('utf-8') - unread_count = int(re.search('UNSEEN (\d+)', unread_str).group(1)) - return unread_count - - @staticmethod - def render_one(unread_count, max_msgs=None, **kwargs): - if not unread_count: - return None - elif type(unread_count) != int or not max_msgs: - return [{ - 'contents': str(unread_count), - 'highlight_group': ['email_alert'], - }] - else: - return [{ - 'contents': str(unread_count), - 'highlight_group': ['email_alert_gradient', 'email_alert'], - 'gradient_level': min(unread_count * 100.0 / max_msgs, 100), - }] - - -email_imap_alert = with_docstring(EmailIMAPSegment(), -'''Return unread e-mail count for IMAP servers. - -:param str username: - login username -:param str password: - login password -:param str server: - e-mail server -:param int port: - e-mail server port -:param str folder: - folder to check for e-mails -:param int max_msgs: - Maximum number of messages. If there are more messages then max_msgs then it - will use gradient level equal to 100, otherwise gradient level is equal to - ``100 * msgs_num / max_msgs``. If not present gradient is not computed. - -Highlight groups used: ``email_alert_gradient`` (gradient), ``email_alert``. -''') - - -STATE_SYMBOLS = { - 'fallback': '', - 'play': '>', - 'pause': '~', - 'stop': 'X', -} - - -class NowPlayingSegment(Segment): - def __call__(self, player='mpd', format='{state_symbol} {artist} - {title} ({total})', state_symbols=STATE_SYMBOLS, **kwargs): - player_func = getattr(self, 'player_{0}'.format(player)) - stats = { - 'state': 'fallback', - 'album': None, - 'artist': None, - 'title': None, - 'elapsed': None, - 'total': None, - } - func_stats = player_func(**kwargs) - if not func_stats: - return None - stats.update(func_stats) - stats['state_symbol'] = state_symbols.get(stats['state']) - return format.format(**stats) - - @staticmethod - def _convert_state(state): - state = state.lower() - if 'play' in state: - return 'play' - if 'pause' in state: - return 'pause' - if 'stop' in state: - return 'stop' - - @staticmethod - def _convert_seconds(seconds): - return '{0:.0f}:{1:02.0f}'.format(*divmod(float(seconds), 60)) - - def player_cmus(self, pl): - '''Return cmus player information. - - cmus-remote -Q returns data with multi-level information i.e. - status playing - file - tag artist - tag title - tag .. - tag n - set continue - set repeat - set .. - set n - - For the information we are looking for we don't really care if we're on - the tag level or the set level. The dictionary comprehension in this - method takes anything in ignore_levels and brings the key inside that - to the first level of the dictionary. - ''' - now_playing_str = run_cmd(pl, ['cmus-remote', '-Q']) - if not now_playing_str: - return - ignore_levels = ('tag', 'set',) - now_playing = dict(((token[0] if token[0] not in ignore_levels else token[1], - (' '.join(token[1:]) if token[0] not in ignore_levels else - ' '.join(token[2:]))) for token in [line.split(' ') for line in now_playing_str.split('\n')[:-1]])) - state = self._convert_state(now_playing.get('status')) - return { - 'state': state, - 'album': now_playing.get('album'), - 'artist': now_playing.get('artist'), - 'title': now_playing.get('title'), - 'elapsed': self._convert_seconds(now_playing.get('position', 0)), - 'total': self._convert_seconds(now_playing.get('duration', 0)), - } - - def player_mpd(self, pl, host='localhost', port=6600): - try: - import mpd - except ImportError: - now_playing = run_cmd(pl, ['mpc', 'current', '-f', '%album%\n%artist%\n%title%\n%time%', '-h', str(host), '-p', str(port)]) - if not now_playing: - return - now_playing = now_playing.split('\n') - return { - 'album': now_playing[0], - 'artist': now_playing[1], - 'title': now_playing[2], - 'total': now_playing[3], - } - else: - client = mpd.MPDClient() - client.connect(host, port) - now_playing = client.currentsong() - if not now_playing: - return - status = client.status() - client.close() - client.disconnect() - return { - 'state': status.get('state'), - 'album': now_playing.get('album'), - 'artist': now_playing.get('artist'), - 'title': now_playing.get('title'), - 'elapsed': self._convert_seconds(now_playing.get('elapsed', 0)), - 'total': self._convert_seconds(now_playing.get('time', 0)), - } - - def player_dbus(self, player_name, bus_name, player_path, iface_prop, iface_player): - try: - import dbus - except ImportError: - pl.exception('Could not add {0} segment: requires python-dbus', player_name) - return - bus = dbus.SessionBus() - try: - player = bus.get_object(bus_name, player_path) - iface = dbus.Interface(player, iface_prop) - info = iface.Get(iface_player, 'Metadata') - status = iface.Get(iface_player, 'PlaybackStatus') - except dbus.exceptions.DBusException: - return - if not info: - return - album = u(info.get('xesam:album')) - title = u(info.get('xesam:title')) - artist = info.get('xesam:artist') - state = self._convert_state(status) - if artist: - artist = u(artist[0]) - return { - 'state': state, - 'album': album, - 'artist': artist, - 'title': title, - 'total': self._convert_seconds(info.get('mpris:length') / 1e6), - } - - def player_spotify_dbus(self, pl): - return self.player_dbus( - player_name='Spotify', - bus_name='com.spotify.qt', - player_path='/', - iface_prop='org.freedesktop.DBus.Properties', - iface_player='org.freedesktop.MediaPlayer2', - ) - - def player_clementine(self, pl): - return self.player_dbus( - player_name='Clementine', - bus_name='org.mpris.MediaPlayer2.clementine', - player_path='/org/mpris/MediaPlayer2', - iface_prop='org.freedesktop.DBus.Properties', - iface_player='org.mpris.MediaPlayer2.Player', - ) - - def player_spotify_apple_script(self, pl): - status_delimiter = '-~`/=' - ascript = ''' - tell application "System Events" - set process_list to (name of every process) - end tell - - if process_list contains "Spotify" then - tell application "Spotify" - if player state is playing or player state is paused then - set track_name to name of current track - set artist_name to artist of current track - set album_name to album of current track - set track_length to duration of current track - set now_playing to "" & player state & "{0}" & album_name & "{0}" & artist_name & "{0}" & track_name & "{0}" & track_length - return now_playing - else - return player state - end if - - end tell - else - return "stopped" - end if - '''.format(status_delimiter) - - spotify = asrun(pl, ascript) - if not asrun: - return None - - spotify_status = spotify.split(status_delimiter) - state = self._convert_state(spotify_status[0]) - if state == 'stop': - return None - return { - 'state': state, - 'album': spotify_status[1], - 'artist': spotify_status[2], - 'title': spotify_status[3], - 'total': self._convert_seconds(int(spotify_status[4])) - } - - try: - __import__('dbus') - except ImportError: - if sys.platform.startswith('darwin'): - player_spotify = player_spotify_apple_script - else: - player_spotify = player_spotify_dbus - else: - player_spotify = player_spotify_dbus - - def player_rhythmbox(self, pl): - now_playing = run_cmd(pl, ['rhythmbox-client', '--no-start', '--no-present', '--print-playing-format', '%at\n%aa\n%tt\n%te\n%td']) - if not now_playing: - return - now_playing = now_playing.split('\n') - return { - 'album': now_playing[0], - 'artist': now_playing[1], - 'title': now_playing[2], - 'elapsed': now_playing[3], - 'total': now_playing[4], - } - - def player_rdio(self, pl): - status_delimiter = '-~`/=' - ascript = ''' - tell application "System Events" - set rdio_active to the count(every process whose name is "Rdio") - if rdio_active is 0 then - return - end if - end tell - tell application "Rdio" - set rdio_name to the name of the current track - set rdio_artist to the artist of the current track - set rdio_album to the album of the current track - set rdio_duration to the duration of the current track - set rdio_state to the player state - set rdio_elapsed to the player position - return rdio_name & "{0}" & rdio_artist & "{0}" & rdio_album & "{0}" & rdio_elapsed & "{0}" & rdio_duration & "{0}" & rdio_state - end tell - '''.format(status_delimiter) - now_playing = asrun(pl, ascript) - if not now_playing: - return - now_playing = now_playing.split('\n') - if len(now_playing) != 6: - return - state = self._convert_state(now_playing[5]) - total = self._convert_seconds(now_playing[4]) - elapsed = self._convert_seconds(float(now_playing[3]) * float(now_playing[4]) / 100) - return { - 'title': now_playing[0], - 'artist': now_playing[1], - 'album': now_playing[2], - 'elapsed': elapsed, - 'total': total, - 'state': state, - 'state_symbol': self.STATE_SYMBOLS.get(state) - } -now_playing = NowPlayingSegment() - - -def _get_battery(pl): - try: - import dbus - except ImportError: - pl.debug('Not using DBUS+UPower as dbus is not available') - else: - try: - bus = dbus.SystemBus() - except Exception as e: - pl.exception('Failed to connect to system bus: {0}', str(e)) - else: - interface = 'org.freedesktop.UPower' - try: - up = bus.get_object(interface, '/org/freedesktop/UPower') - except dbus.exceptions.DBusException as e: - if getattr(e, '_dbus_error_name', '').endswidth('ServiceUnknown'): - pl.debug('Not using DBUS+UPower as UPower is not available via dbus') - else: - pl.exception('Failed to get UPower service with dbus: {0}', str(e)) - else: - devinterface = 'org.freedesktop.DBus.Properties' - devtype_name = interface + '.Device' - for devpath in up.EnumerateDevices(dbus_interface=interface): - dev = bus.get_object(interface, devpath) - devget = lambda what: dev.Get( - devtype_name, - what, - dbus_interface=devinterface - ) - if int(devget('Type')) != 2: - pl.debug('Not using DBUS+UPower with {0}: invalid type', devpath) - continue - if not bool(devget('IsPresent')): - pl.debug('Not using DBUS+UPower with {0}: not present', devpath) - continue - if not bool(devget('PowerSupply')): - pl.debug('Not using DBUS+UPower with {0}: not a power supply', devpath) - continue - pl.debug('Using DBUS+UPower with {0}', devpath) - return lambda pl: float( - dbus.Interface(dev, dbus_interface=devinterface).Get( - devtype_name, - 'Percentage' - ) - ) - pl.debug('Not using DBUS+UPower as no batteries were found') - - if os.path.isdir('/sys/class/power_supply'): - linux_bat_fmt = '/sys/class/power_supply/{0}/capacity' - for linux_bat in os.listdir('/sys/class/power_supply'): - cap_path = linux_bat_fmt.format(linux_bat) - if linux_bat.startswith('BAT') and os.path.exists(cap_path): - pl.debug('Using /sys/class/power_supply with battery {0}', linux_bat) - - def _get_capacity(pl): - with open(cap_path, 'r') as f: - return int(float(f.readline().split()[0])) - - return _get_capacity - pl.debug('Not using /sys/class/power_supply as no batteries were found') - else: - pl.debug('Not using /sys/class/power_supply: no directory') - - try: - from shutil import which # Python-3.3 and later - except ImportError: - pl.info('Using dumb “which” which only checks for file in /usr/bin') - which = lambda f: (lambda fp: os.path.exists(fp) and fp)(os.path.join('/usr/bin', f)) - - if which('pmset'): - pl.debug('Using pmset') - - def _get_capacity(pl): - import re - battery_summary = run_cmd(pl, ['pmset', '-g', 'batt']) - battery_percent = re.search(r'(\d+)%', battery_summary).group(1) - return int(battery_percent) - - return _get_capacity - else: - pl.debug('Not using pmset: executable not found') - - if sys.platform.startswith('win'): - # From http://stackoverflow.com/a/21083571/273566, reworked - try: - from win32com.client import GetObject - except ImportError: - pl.debug('Not using win32com.client as it is not available') - else: - try: - wmi = GetObject('winmgmts:') - except Exception as e: - pl.exception('Failed to run GetObject from win32com.client: {0}', str(e)) - else: - for battery in wmi.InstancesOf('Win32_Battery'): - pl.debug('Using win32com.client with Win32_Battery') - - def _get_capacity(pl): - # http://msdn.microsoft.com/en-us/library/aa394074(v=vs.85).aspx - return battery.EstimatedChargeRemaining - - return _get_capacity - pl.debug('Not using win32com.client as no batteries were found') - - from ctypes import Structure, c_byte, c_ulong, windll, byref - - class PowerClass(Structure): - _fields_ = [ - ('ACLineStatus', c_byte), - ('BatteryFlag', c_byte), - ('BatteryLifePercent', c_byte), - ('Reserved1', c_byte), - ('BatteryLifeTime', c_ulong), - ('BatteryFullLifeTime', c_ulong) - ] - - def _get_capacity(pl): - powerclass = PowerClass() - result = windll.kernel32.GetSystemPowerStatus(byref(powerclass)) - # http://msdn.microsoft.com/en-us/library/windows/desktop/aa372693(v=vs.85).aspx - if result: - return None - return powerclass.BatteryLifePercent - - if _get_capacity() is None: - pl.debug('Not using GetSystemPowerStatus because it failed') - else: - pl.debug('Using GetSystemPowerStatus') - - return _get_capacity - - raise NotImplementedError - - -def _get_capacity(pl): - global _get_capacity - - def _failing_get_capacity(pl): - raise NotImplementedError - - try: - _get_capacity = _get_battery(pl) - except NotImplementedError: - _get_capacity = _failing_get_capacity - except Exception as e: - pl.exception('Exception while obtaining battery capacity getter: {0}', str(e)) - _get_capacity = _failing_get_capacity - return _get_capacity(pl) - - -def battery(pl, format='{capacity:3.0%}', steps=5, gamify=False, full_heart='O', empty_heart='O'): - '''Return battery charge status. - - :param str format: - Percent format in case gamify is False. - :param int steps: - Number of discrete steps to show between 0% and 100% capacity if gamify - is True. - :param bool gamify: - Measure in hearts (♥) instead of percentages. For full hearts - ``battery_full`` highlighting group is preferred, for empty hearts there - is ``battery_empty``. - :param str full_heart: - Heart displayed for “full” part of battery. - :param str empty_heart: - Heart displayed for “used” part of battery. It is also displayed using - another gradient level and highlighting group, so it is OK for it to be - the same as full_heart as long as necessary highlighting groups are - defined. - - ``battery_gradient`` and ``battery`` groups are used in any case, first is - preferred. - - Highlight groups used: ``battery_full`` or ``battery_gradient`` (gradient) or ``battery``, ``battery_empty`` or ``battery_gradient`` (gradient) or ``battery``. - ''' - try: - capacity = _get_capacity(pl) - except NotImplementedError: - pl.info('Unable to get battery capacity.') - return None - ret = [] - if gamify: - denom = int(steps) - numer = int(denom * capacity / 100) - ret.append({ - 'contents': full_heart * numer, - 'draw_inner_divider': False, - 'highlight_group': ['battery_full', 'battery_gradient', 'battery'], - # Using zero as “nothing to worry about”: it is least alert color. - 'gradient_level': 0, - }) - ret.append({ - 'contents': empty_heart * (denom - numer), - 'draw_inner_divider': False, - 'highlight_group': ['battery_empty', 'battery_gradient', 'battery'], - # Using a hundred as it is most alert color. - 'gradient_level': 100, - }) - else: - ret.append({ - 'contents': format.format(capacity=(capacity / 100.0)), - 'highlight_group': ['battery_gradient', 'battery'], - # Gradients are “least alert – most alert” by default, capacity has - # the opposite semantics. - 'gradient_level': 100 - capacity, - }) - return ret diff --git a/powerline/segments/common/__init__.py b/powerline/segments/common/__init__.py new file mode 100644 index 00000000..f5ec2fa9 --- /dev/null +++ b/powerline/segments/common/__init__.py @@ -0,0 +1,26 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + + +# DEPRECATED MODULE. Do not add any segments below. Do not remove existing +# segments as well until next major release. + + +from powerline.segments.common.vcs import branch # NOQA +from powerline.segments.common.sys import cpu_load_percent # NOQA +from powerline.segments.common.sys import uptime # NOQA +from powerline.segments.common.sys import system_load # NOQA +from powerline.segments.common.net import hostname # NOQA +from powerline.segments.common.net import external_ip # NOQA +from powerline.segments.common.net import internal_ip # NOQA +from powerline.segments.common.net import network_load # NOQA +from powerline.segments.common.env import cwd # NOQA +from powerline.segments.common.env import user # NOQA +from powerline.segments.common.env import environment # NOQA +from powerline.segments.common.env import virtualenv # NOQA +from powerline.segments.common.bat import battery # NOQA +from powerline.segments.common.wthr import weather # NOQA +from powerline.segments.common.time import date # NOQA +from powerline.segments.common.time import fuzzy_time # NOQA +from powerline.segments.common.mail import email_imap_alert # NOQA +from powerline.segments.common.players import now_playing # NOQA diff --git a/powerline/segments/common/bat.py b/powerline/segments/common/bat.py new file mode 100644 index 00000000..713bc149 --- /dev/null +++ b/powerline/segments/common/bat.py @@ -0,0 +1,222 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import os +import sys +import re + +from powerline.lib.shell import run_cmd + + +# XXX Warning: module name must not be equal to the segment name as long as this +# segment is imported into powerline.segments.common module. + + +def _get_battery(pl): + try: + import dbus + except ImportError: + pl.debug('Not using DBUS+UPower as dbus is not available') + else: + try: + bus = dbus.SystemBus() + except Exception as e: + pl.exception('Failed to connect to system bus: {0}', str(e)) + else: + interface = 'org.freedesktop.UPower' + try: + up = bus.get_object(interface, '/org/freedesktop/UPower') + except dbus.exceptions.DBusException as e: + if getattr(e, '_dbus_error_name', '').endswidth('ServiceUnknown'): + pl.debug('Not using DBUS+UPower as UPower is not available via dbus') + else: + pl.exception('Failed to get UPower service with dbus: {0}', str(e)) + else: + devinterface = 'org.freedesktop.DBus.Properties' + devtype_name = interface + '.Device' + for devpath in up.EnumerateDevices(dbus_interface=interface): + dev = bus.get_object(interface, devpath) + devget = lambda what: dev.Get( + devtype_name, + what, + dbus_interface=devinterface + ) + if int(devget('Type')) != 2: + pl.debug('Not using DBUS+UPower with {0}: invalid type', devpath) + continue + if not bool(devget('IsPresent')): + pl.debug('Not using DBUS+UPower with {0}: not present', devpath) + continue + if not bool(devget('PowerSupply')): + pl.debug('Not using DBUS+UPower with {0}: not a power supply', devpath) + continue + pl.debug('Using DBUS+UPower with {0}', devpath) + return lambda pl: float( + dbus.Interface(dev, dbus_interface=devinterface).Get( + devtype_name, + 'Percentage' + ) + ) + pl.debug('Not using DBUS+UPower as no batteries were found') + + if os.path.isdir('/sys/class/power_supply'): + linux_bat_fmt = '/sys/class/power_supply/{0}/capacity' + for linux_bat in os.listdir('/sys/class/power_supply'): + cap_path = linux_bat_fmt.format(linux_bat) + if linux_bat.startswith('BAT') and os.path.exists(cap_path): + pl.debug('Using /sys/class/power_supply with battery {0}', linux_bat) + + def _get_capacity(pl): + with open(cap_path, 'r') as f: + return int(float(f.readline().split()[0])) + + return _get_capacity + pl.debug('Not using /sys/class/power_supply as no batteries were found') + else: + pl.debug('Not using /sys/class/power_supply: no directory') + + try: + from shutil import which # Python-3.3 and later + except ImportError: + pl.info('Using dumb “which” which only checks for file in /usr/bin') + which = lambda f: (lambda fp: os.path.exists(fp) and fp)(os.path.join('/usr/bin', f)) + + if which('pmset'): + pl.debug('Using pmset') + + BATTERY_PERCENT_RE = re.compile(r'(\d+)%') + + def _get_capacity(pl): + battery_summary = run_cmd(pl, ['pmset', '-g', 'batt']) + battery_percent = BATTERY_PERCENT_RE.search(battery_summary).group(1) + return int(battery_percent) + + return _get_capacity + else: + pl.debug('Not using pmset: executable not found') + + if sys.platform.startswith('win'): + # From http://stackoverflow.com/a/21083571/273566, reworked + try: + from win32com.client import GetObject + except ImportError: + pl.debug('Not using win32com.client as it is not available') + else: + try: + wmi = GetObject('winmgmts:') + except Exception as e: + pl.exception('Failed to run GetObject from win32com.client: {0}', str(e)) + else: + for battery in wmi.InstancesOf('Win32_Battery'): + pl.debug('Using win32com.client with Win32_Battery') + + def _get_capacity(pl): + # http://msdn.microsoft.com/en-us/library/aa394074(v=vs.85).aspx + return battery.EstimatedChargeRemaining + + return _get_capacity + pl.debug('Not using win32com.client as no batteries were found') + + from ctypes import Structure, c_byte, c_ulong, windll, byref + + class PowerClass(Structure): + _fields_ = [ + ('ACLineStatus', c_byte), + ('BatteryFlag', c_byte), + ('BatteryLifePercent', c_byte), + ('Reserved1', c_byte), + ('BatteryLifeTime', c_ulong), + ('BatteryFullLifeTime', c_ulong) + ] + + def _get_capacity(pl): + powerclass = PowerClass() + result = windll.kernel32.GetSystemPowerStatus(byref(powerclass)) + # http://msdn.microsoft.com/en-us/library/windows/desktop/aa372693(v=vs.85).aspx + if result: + return None + return powerclass.BatteryLifePercent + + if _get_capacity() is None: + pl.debug('Not using GetSystemPowerStatus because it failed') + else: + pl.debug('Using GetSystemPowerStatus') + + return _get_capacity + + raise NotImplementedError + + +def _get_capacity(pl): + global _get_capacity + + def _failing_get_capacity(pl): + raise NotImplementedError + + try: + _get_capacity = _get_battery(pl) + except NotImplementedError: + _get_capacity = _failing_get_capacity + except Exception as e: + pl.exception('Exception while obtaining battery capacity getter: {0}', str(e)) + _get_capacity = _failing_get_capacity + return _get_capacity(pl) + + +def battery(pl, format='{capacity:3.0%}', steps=5, gamify=False, full_heart='O', empty_heart='O'): + '''Return battery charge status. + + :param str format: + Percent format in case gamify is False. + :param int steps: + Number of discrete steps to show between 0% and 100% capacity if gamify + is True. + :param bool gamify: + Measure in hearts (♥) instead of percentages. For full hearts + ``battery_full`` highlighting group is preferred, for empty hearts there + is ``battery_empty``. + :param str full_heart: + Heart displayed for “full” part of battery. + :param str empty_heart: + Heart displayed for “used” part of battery. It is also displayed using + another gradient level and highlighting group, so it is OK for it to be + the same as full_heart as long as necessary highlighting groups are + defined. + + ``battery_gradient`` and ``battery`` groups are used in any case, first is + preferred. + + Highlight groups used: ``battery_full`` or ``battery_gradient`` (gradient) or ``battery``, ``battery_empty`` or ``battery_gradient`` (gradient) or ``battery``. + ''' + try: + capacity = _get_capacity(pl) + except NotImplementedError: + pl.info('Unable to get battery capacity.') + return None + ret = [] + if gamify: + denom = int(steps) + numer = int(denom * capacity / 100) + ret.append({ + 'contents': full_heart * numer, + 'draw_inner_divider': False, + 'highlight_group': ['battery_full', 'battery_gradient', 'battery'], + # Using zero as “nothing to worry about”: it is least alert color. + 'gradient_level': 0, + }) + ret.append({ + 'contents': empty_heart * (denom - numer), + 'draw_inner_divider': False, + 'highlight_group': ['battery_empty', 'battery_gradient', 'battery'], + # Using a hundred as it is most alert color. + 'gradient_level': 100, + }) + else: + ret.append({ + 'contents': format.format(capacity=(capacity / 100.0)), + 'highlight_group': ['battery_gradient', 'battery'], + # Gradients are “least alert – most alert” by default, capacity has + # the opposite semantics. + 'gradient_level': 100 - capacity, + }) + return ret diff --git a/powerline/segments/common/env.py b/powerline/segments/common/env.py new file mode 100644 index 00000000..f6c49c55 --- /dev/null +++ b/powerline/segments/common/env.py @@ -0,0 +1,168 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import os + +from powerline.lib.unicode import out_u +from powerline.theme import requires_segment_info +from powerline.segments import Segment, with_docstring + + +@requires_segment_info +def environment(pl, segment_info, variable=None): + '''Return the value of any defined environment variable + + :param string variable: + The environment variable to return if found + ''' + return segment_info['environ'].get(variable, None) + + +@requires_segment_info +def virtualenv(pl, segment_info): + '''Return the name of the current Python virtualenv.''' + return os.path.basename(segment_info['environ'].get('VIRTUAL_ENV', '')) or None + + +@requires_segment_info +class CwdSegment(Segment): + def argspecobjs(self): + for obj in super(CwdSegment, self).argspecobjs(): + yield obj + yield 'get_shortened_path', self.get_shortened_path + + def omitted_args(self, name, method): + if method is self.get_shortened_path: + return (0, 1, 2) + else: + return super(CwdSegment, self).omitted_args(name, method) + + def get_shortened_path(self, pl, segment_info, shorten_home=True, **kwargs): + try: + path = out_u(segment_info['getcwd']()) + except OSError as e: + if e.errno == 2: + # user most probably deleted the directory + # this happens when removing files from Mercurial repos for example + pl.warn('Current directory not found') + return '[not found]' + else: + raise + if shorten_home: + home = segment_info['home'] + if home: + home = out_u(home) + if path.startswith(home): + path = '~' + path[len(home):] + return path + + def __call__(self, pl, segment_info, + dir_shorten_len=None, + dir_limit_depth=None, + use_path_separator=False, + ellipsis='...', + **kwargs): + cwd = self.get_shortened_path(pl, segment_info, **kwargs) + cwd_split = cwd.split(os.sep) + cwd_split_len = len(cwd_split) + cwd = [i[0:dir_shorten_len] if dir_shorten_len and i else i for i in cwd_split[:-1]] + [cwd_split[-1]] + if dir_limit_depth and cwd_split_len > dir_limit_depth + 1: + del(cwd[0:-dir_limit_depth]) + if ellipsis is not None: + cwd.insert(0, ellipsis) + ret = [] + if not cwd[0]: + cwd[0] = '/' + draw_inner_divider = not use_path_separator + for part in cwd: + if not part: + continue + if use_path_separator: + part += os.sep + ret.append({ + 'contents': part, + 'divider_highlight_group': 'cwd:divider', + 'draw_inner_divider': draw_inner_divider, + }) + ret[-1]['highlight_group'] = ['cwd:current_folder', 'cwd'] + if use_path_separator: + ret[-1]['contents'] = ret[-1]['contents'][:-1] + if len(ret) > 1 and ret[0]['contents'][0] == os.sep: + ret[0]['contents'] = ret[0]['contents'][1:] + return ret + + +cwd = with_docstring(CwdSegment(), +'''Return the current working directory. + +Returns a segment list to create a breadcrumb-like effect. + +:param int dir_shorten_len: + shorten parent directory names to this length (e.g. + :file:`/long/path/to/powerline` → :file:`/l/p/t/powerline`) +:param int dir_limit_depth: + limit directory depth to this number (e.g. + :file:`/long/path/to/powerline` → :file:`⋯/to/powerline`) +:param bool use_path_separator: + Use path separator in place of soft divider. +:param bool shorten_home: + Shorten home directory to ``~``. +:param str ellipsis: + Specifies what to use in place of omitted directories. Use None to not + show this subsegment at all. + +Divider highlight group used: ``cwd:divider``. + +Highlight groups used: ``cwd:current_folder`` or ``cwd``. It is recommended to define all highlight groups. +''') + + +try: + import psutil + + # psutil-2.0.0: psutil.Process.username is unbound method + if callable(psutil.Process.username): + def _get_user(): + return psutil.Process(os.getpid()).username() + # pre psutil-2.0.0: psutil.Process.username has type property + else: + def _get_user(): + return psutil.Process(os.getpid()).username +except ImportError: + try: + import pwd + except ImportError: + from getpass import getuser as _get_user + else: + def _get_user(): + return pwd.getpwuid(os.geteuid()).pw_name + + +username = False +# os.geteuid is not available on windows +_geteuid = getattr(os, 'geteuid', lambda: 1) + + +def user(pl, hide_user=None): + '''Return the current user. + + :param str hide_user: + Omit showing segment for users with names equal to this string. + + Highlights the user with the ``superuser`` if the effective user ID is 0. + + Highlight groups used: ``superuser`` or ``user``. It is recommended to define all highlight groups. + ''' + global username + if username is False: + username = _get_user() + if username is None: + pl.warn('Failed to get username') + return None + if username == hide_user: + return None + euid = _geteuid() + return [{ + 'contents': username, + 'highlight_group': ['user'] if euid != 0 else ['superuser', 'user'], + }] diff --git a/powerline/segments/common/mail.py b/powerline/segments/common/mail.py new file mode 100644 index 00000000..dc266f36 --- /dev/null +++ b/powerline/segments/common/mail.py @@ -0,0 +1,74 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import re + +from collections import namedtuple + +from powerline.lib.threaded import KwThreadedSegment +from powerline.segments import with_docstring + + +_IMAPKey = namedtuple('Key', 'username password server port folder') + + +class EmailIMAPSegment(KwThreadedSegment): + interval = 60 + + @staticmethod + def key(username, password, server='imap.gmail.com', port=993, folder='INBOX', **kwargs): + return _IMAPKey(username, password, server, port, folder) + + def compute_state(self, key): + if not key.username or not key.password: + self.warn('Username and password are not configured') + return None + try: + import imaplib + except imaplib.IMAP4.error as e: + unread_count = str(e) + else: + mail = imaplib.IMAP4_SSL(key.server, key.port) + mail.login(key.username, key.password) + rc, message = mail.status(key.folder, '(UNSEEN)') + unread_str = message[0].decode('utf-8') + unread_count = int(re.search('UNSEEN (\d+)', unread_str).group(1)) + return unread_count + + @staticmethod + def render_one(unread_count, max_msgs=None, **kwargs): + if not unread_count: + return None + elif type(unread_count) != int or not max_msgs: + return [{ + 'contents': str(unread_count), + 'highlight_group': ['email_alert'], + }] + else: + return [{ + 'contents': str(unread_count), + 'highlight_group': ['email_alert_gradient', 'email_alert'], + 'gradient_level': min(unread_count * 100.0 / max_msgs, 100), + }] + + +email_imap_alert = with_docstring(EmailIMAPSegment(), +'''Return unread e-mail count for IMAP servers. + +:param str username: + login username +:param str password: + login password +:param str server: + e-mail server +:param int port: + e-mail server port +:param str folder: + folder to check for e-mails +:param int max_msgs: + Maximum number of messages. If there are more messages then max_msgs then it + will use gradient level equal to 100, otherwise gradient level is equal to + ``100 * msgs_num / max_msgs``. If not present gradient is not computed. + +Highlight groups used: ``email_alert_gradient`` (gradient), ``email_alert``. +''') diff --git a/powerline/segments/common/net.py b/powerline/segments/common/net.py new file mode 100644 index 00000000..3e4ccb24 --- /dev/null +++ b/powerline/segments/common/net.py @@ -0,0 +1,287 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import re +import os +import socket + +from powerline.lib.url import urllib_read +from powerline.lib.threaded import ThreadedSegment, KwThreadedSegment +from powerline.lib.monotonic import monotonic +from powerline.lib.humanize_bytes import humanize_bytes +from powerline.segments import with_docstring +from powerline.theme import requires_segment_info + + +@requires_segment_info +def hostname(pl, segment_info, only_if_ssh=False, exclude_domain=False): + '''Return the current hostname. + + :param bool only_if_ssh: + only return the hostname if currently in an SSH session + :param bool exclude_domain: + return the hostname without domain if there is one + ''' + if only_if_ssh and not segment_info['environ'].get('SSH_CLIENT'): + return None + if exclude_domain: + return socket.gethostname().split('.')[0] + return socket.gethostname() + + +def _external_ip(query_url='http://ipv4.icanhazip.com/'): + return urllib_read(query_url).strip() + + +class ExternalIpSegment(ThreadedSegment): + interval = 300 + + def set_state(self, query_url='http://ipv4.icanhazip.com/', **kwargs): + self.query_url = query_url + super(ExternalIpSegment, self).set_state(**kwargs) + + def update(self, old_ip): + return _external_ip(query_url=self.query_url) + + def render(self, ip, **kwargs): + if not ip: + return None + return [{'contents': ip, 'divider_highlight_group': 'background:divider'}] + + +external_ip = with_docstring(ExternalIpSegment(), +'''Return external IP address. + +:param str query_url: + URI to query for IP address, should return only the IP address as a text string + + Suggested URIs: + + * http://ipv4.icanhazip.com/ + * http://ipv6.icanhazip.com/ + * http://icanhazip.com/ (returns IPv6 address if available, else IPv4) + +Divider highlight group used: ``background:divider``. +''') + + +try: + import netifaces +except ImportError: + def internal_ip(pl, interface='detect', ipv=4): + return None +else: + _interface_starts = { + 'eth': 10, # Regular ethernet adapters : eth1 + 'enp': 10, # Regular ethernet adapters, Gentoo : enp2s0 + 'ath': 9, # Atheros WiFi adapters : ath0 + 'wlan': 9, # Other WiFi adapters : wlan1 + 'wlp': 9, # Other WiFi adapters, Gentoo : wlp5s0 + 'teredo': 1, # miredo interface : teredo + 'lo': -10, # Loopback interface : lo + } + + _interface_start_re = re.compile(r'^([a-z]+?)(\d|$)') + + def _interface_key(interface): + match = _interface_start_re.match(interface) + if match: + try: + base = _interface_starts[match.group(1)] * 100 + except KeyError: + base = 500 + if match.group(2): + return base - int(match.group(2)) + else: + return base + else: + return 0 + + def internal_ip(pl, interface='detect', ipv=4): + if interface == 'detect': + try: + interface = next(iter(sorted(netifaces.interfaces(), key=_interface_key, reverse=True))) + except StopIteration: + pl.info('No network interfaces found') + return None + addrs = netifaces.ifaddresses(interface) + try: + return addrs[netifaces.AF_INET6 if ipv == 6 else netifaces.AF_INET][0]['addr'] + except (KeyError, IndexError): + return None + + +internal_ip = with_docstring(internal_ip, +'''Return internal IP address + +Requires ``netifaces`` module to work properly. + +:param str interface: + Interface on which IP will be checked. Use ``detect`` to automatically + detect interface. In this case interfaces with lower numbers will be + preferred over interfaces with similar names. Order of preference based on + names: + + #. ``eth`` and ``enp`` followed by number or the end of string. + #. ``ath``, ``wlan`` and ``wlp`` followed by number or the end of string. + #. ``teredo`` followed by number or the end of string. + #. Any other interface that is not ``lo*``. + #. ``lo`` followed by number or the end of string. +:param int ipv: + 4 or 6 for ipv4 and ipv6 respectively, depending on which IP address you + need exactly. +''') + + +try: + import psutil + + def _get_bytes(interface): + try: + io_counters = psutil.net_io_counters(pernic=True) + except AttributeError: + io_counters = psutil.network_io_counters(pernic=True) + if_io = io_counters.get(interface) + if not if_io: + return None + return if_io.bytes_recv, if_io.bytes_sent + + def _get_interfaces(): + io_counters = psutil.network_io_counters(pernic=True) + for interface, data in io_counters.items(): + if data: + yield interface, data.bytes_recv, data.bytes_sent +except ImportError: + def _get_bytes(interface): + with open('/sys/class/net/{interface}/statistics/rx_bytes'.format(interface=interface), 'rb') as file_obj: + rx = int(file_obj.read()) + with open('/sys/class/net/{interface}/statistics/tx_bytes'.format(interface=interface), 'rb') as file_obj: + tx = int(file_obj.read()) + return (rx, tx) + + def _get_interfaces(): + for interface in os.listdir('/sys/class/net'): + x = _get_bytes(interface) + if x is not None: + yield interface, x[0], x[1] + + +class NetworkLoadSegment(KwThreadedSegment): + interfaces = {} + replace_num_pat = re.compile(r'[a-zA-Z]+') + + @staticmethod + def key(interface='detect', **kwargs): + return interface + + def compute_state(self, interface): + if interface == 'detect': + proc_exists = getattr(self, 'proc_exists', None) + if proc_exists is None: + proc_exists = self.proc_exists = os.path.exists('/proc/net/route') + if proc_exists: + # Look for default interface in routing table + with open('/proc/net/route', 'rb') as f: + for line in f.readlines(): + parts = line.split() + if len(parts) > 1: + iface, destination = parts[:2] + if not destination.replace(b'0', b''): + interface = iface.decode('utf-8') + break + if interface == 'detect': + # Choose interface with most total activity, excluding some + # well known interface names + interface, total = 'eth0', -1 + for name, rx, tx in _get_interfaces(): + base = self.replace_num_pat.match(name) + if None in (base, rx, tx) or base.group() in ('lo', 'vmnet', 'sit'): + continue + activity = rx + tx + if activity > total: + total = activity + interface = name + + try: + idata = self.interfaces[interface] + try: + idata['prev'] = idata['last'] + except KeyError: + pass + except KeyError: + idata = {} + if self.run_once: + idata['prev'] = (monotonic(), _get_bytes(interface)) + self.shutdown_event.wait(self.interval) + self.interfaces[interface] = idata + + idata['last'] = (monotonic(), _get_bytes(interface)) + return idata.copy() + + def render_one(self, idata, recv_format='DL {value:>8}', sent_format='UL {value:>8}', suffix='B/s', si_prefix=False, **kwargs): + if not idata or 'prev' not in idata: + return None + + t1, b1 = idata['prev'] + t2, b2 = idata['last'] + measure_interval = t2 - t1 + + if None in (b1, b2): + return None + + r = [] + for i, key in zip((0, 1), ('recv', 'sent')): + format = locals()[key + '_format'] + try: + value = (b2[i] - b1[i]) / measure_interval + except ZeroDivisionError: + self.warn('Measure interval zero.') + value = 0 + max_key = key + '_max' + is_gradient = max_key in kwargs + hl_groups = ['network_load_' + key, 'network_load'] + if is_gradient: + hl_groups[:0] = (group + '_gradient' for group in hl_groups) + r.append({ + 'contents': format.format(value=humanize_bytes(value, suffix, si_prefix)), + 'divider_highlight_group': 'background:divider', + 'highlight_group': hl_groups, + }) + if is_gradient: + max = kwargs[max_key] + if value >= max: + r[-1]['gradient_level'] = 100 + else: + r[-1]['gradient_level'] = value * 100.0 / max + + return r + + +network_load = with_docstring(NetworkLoadSegment(), +'''Return the network load. + +Uses the ``psutil`` module if available for multi-platform compatibility, +falls back to reading +:file:`/sys/class/net/{interface}/statistics/{rx,tx}_bytes`. + +:param str interface: + network interface to measure (use the special value "detect" to have powerline try to auto-detect the network interface) +:param str suffix: + string appended to each load string +:param bool si_prefix: + use SI prefix, e.g. MB instead of MiB +:param str recv_format: + format string, receives ``value`` as argument +:param str sent_format: + format string, receives ``value`` as argument +:param float recv_max: + maximum number of received bytes per second. Is only used to compute + gradient level +:param float sent_max: + maximum number of sent bytes per second. Is only used to compute gradient + level + +Divider highlight group used: ``background:divider``. + +Highlight groups used: ``network_load_sent_gradient`` (gradient) or ``network_load_recv_gradient`` (gradient) or ``network_load_gradient`` (gradient), ``network_load_sent`` or ``network_load_recv`` or ``network_load``. +''') diff --git a/powerline/segments/common/players.py b/powerline/segments/common/players.py new file mode 100644 index 00000000..589e419e --- /dev/null +++ b/powerline/segments/common/players.py @@ -0,0 +1,270 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import sys + +from powerline.lib.shell import asrun, run_cmd +from powerline.lib.unicode import out_u +from powerline.segments import Segment + + +STATE_SYMBOLS = { + 'fallback': '', + 'play': '>', + 'pause': '~', + 'stop': 'X', +} + + +class NowPlayingSegment(Segment): + def __call__(self, player='mpd', format='{state_symbol} {artist} - {title} ({total})', state_symbols=STATE_SYMBOLS, **kwargs): + player_func = getattr(self, 'player_{0}'.format(player)) + stats = { + 'state': 'fallback', + 'album': None, + 'artist': None, + 'title': None, + 'elapsed': None, + 'total': None, + } + func_stats = player_func(**kwargs) + if not func_stats: + return None + stats.update(func_stats) + stats['state_symbol'] = state_symbols.get(stats['state']) + return format.format(**stats) + + @staticmethod + def _convert_state(state): + state = state.lower() + if 'play' in state: + return 'play' + if 'pause' in state: + return 'pause' + if 'stop' in state: + return 'stop' + + @staticmethod + def _convert_seconds(seconds): + return '{0:.0f}:{1:02.0f}'.format(*divmod(float(seconds), 60)) + + def player_cmus(self, pl): + '''Return cmus player information. + + cmus-remote -Q returns data with multi-level information i.e. + status playing + file + tag artist + tag title + tag .. + tag n + set continue + set repeat + set .. + set n + + For the information we are looking for we don’t really care if we’re on + the tag level or the set level. The dictionary comprehension in this + method takes anything in ignore_levels and brings the key inside that + to the first level of the dictionary. + ''' + now_playing_str = run_cmd(pl, ['cmus-remote', '-Q']) + if not now_playing_str: + return + ignore_levels = ('tag', 'set',) + now_playing = dict(((token[0] if token[0] not in ignore_levels else token[1], + (' '.join(token[1:]) if token[0] not in ignore_levels else + ' '.join(token[2:]))) for token in [line.split(' ') for line in now_playing_str.split('\n')[:-1]])) + state = self._convert_state(now_playing.get('status')) + return { + 'state': state, + 'album': now_playing.get('album'), + 'artist': now_playing.get('artist'), + 'title': now_playing.get('title'), + 'elapsed': self._convert_seconds(now_playing.get('position', 0)), + 'total': self._convert_seconds(now_playing.get('duration', 0)), + } + + def player_mpd(self, pl, host='localhost', port=6600): + try: + import mpd + except ImportError: + now_playing = run_cmd(pl, ['mpc', 'current', '-f', '%album%\n%artist%\n%title%\n%time%', '-h', str(host), '-p', str(port)]) + if not now_playing: + return + now_playing = now_playing.split('\n') + return { + 'album': now_playing[0], + 'artist': now_playing[1], + 'title': now_playing[2], + 'total': now_playing[3], + } + else: + client = mpd.MPDClient() + client.connect(host, port) + now_playing = client.currentsong() + if not now_playing: + return + status = client.status() + client.close() + client.disconnect() + return { + 'state': status.get('state'), + 'album': now_playing.get('album'), + 'artist': now_playing.get('artist'), + 'title': now_playing.get('title'), + 'elapsed': self._convert_seconds(now_playing.get('elapsed', 0)), + 'total': self._convert_seconds(now_playing.get('time', 0)), + } + + def player_dbus(self, player_name, bus_name, player_path, iface_prop, iface_player): + try: + import dbus + except ImportError: + self.exception('Could not add {0} segment: requires dbus module', player_name) + return + bus = dbus.SessionBus() + try: + player = bus.get_object(bus_name, player_path) + iface = dbus.Interface(player, iface_prop) + info = iface.Get(iface_player, 'Metadata') + status = iface.Get(iface_player, 'PlaybackStatus') + except dbus.exceptions.DBusException: + return + if not info: + return + album = out_u(info.get('xesam:album')) + title = out_u(info.get('xesam:title')) + artist = info.get('xesam:artist') + state = self._convert_state(status) + if artist: + artist = out_u(artist[0]) + return { + 'state': state, + 'album': album, + 'artist': artist, + 'title': title, + 'total': self._convert_seconds(info.get('mpris:length') / 1e6), + } + + def player_spotify_dbus(self, pl): + return self.player_dbus( + player_name='Spotify', + bus_name='com.spotify.qt', + player_path='/', + iface_prop='org.freedesktop.DBus.Properties', + iface_player='org.freedesktop.MediaPlayer2', + ) + + def player_clementine(self, pl): + return self.player_dbus( + player_name='Clementine', + bus_name='org.mpris.MediaPlayer2.clementine', + player_path='/org/mpris/MediaPlayer2', + iface_prop='org.freedesktop.DBus.Properties', + iface_player='org.mpris.MediaPlayer2.Player', + ) + + def player_spotify_apple_script(self, pl): + status_delimiter = '-~`/=' + ascript = ''' + tell application "System Events" + set process_list to (name of every process) + end tell + + if process_list contains "Spotify" then + tell application "Spotify" + if player state is playing or player state is paused then + set track_name to name of current track + set artist_name to artist of current track + set album_name to album of current track + set track_length to duration of current track + set now_playing to "" & player state & "{0}" & album_name & "{0}" & artist_name & "{0}" & track_name & "{0}" & track_length + return now_playing + else + return player state + end if + + end tell + else + return "stopped" + end if + '''.format(status_delimiter) + + spotify = asrun(pl, ascript) + if not asrun: + return None + + spotify_status = spotify.split(status_delimiter) + state = self._convert_state(spotify_status[0]) + if state == 'stop': + return None + return { + 'state': state, + 'album': spotify_status[1], + 'artist': spotify_status[2], + 'title': spotify_status[3], + 'total': self._convert_seconds(int(spotify_status[4])) + } + + try: + __import__('dbus') + except ImportError: + if sys.platform.startswith('darwin'): + player_spotify = player_spotify_apple_script + else: + player_spotify = player_spotify_dbus + else: + player_spotify = player_spotify_dbus + + def player_rhythmbox(self, pl): + now_playing = run_cmd(pl, ['rhythmbox-client', '--no-start', '--no-present', '--print-playing-format', '%at\n%aa\n%tt\n%te\n%td']) + if not now_playing: + return + now_playing = now_playing.split('\n') + return { + 'album': now_playing[0], + 'artist': now_playing[1], + 'title': now_playing[2], + 'elapsed': now_playing[3], + 'total': now_playing[4], + } + + def player_rdio(self, pl): + status_delimiter = '-~`/=' + ascript = ''' + tell application "System Events" + set rdio_active to the count(every process whose name is "Rdio") + if rdio_active is 0 then + return + end if + end tell + tell application "Rdio" + set rdio_name to the name of the current track + set rdio_artist to the artist of the current track + set rdio_album to the album of the current track + set rdio_duration to the duration of the current track + set rdio_state to the player state + set rdio_elapsed to the player position + return rdio_name & "{0}" & rdio_artist & "{0}" & rdio_album & "{0}" & rdio_elapsed & "{0}" & rdio_duration & "{0}" & rdio_state + end tell + '''.format(status_delimiter) + now_playing = asrun(pl, ascript) + if not now_playing: + return + now_playing = now_playing.split('\n') + if len(now_playing) != 6: + return + state = self._convert_state(now_playing[5]) + total = self._convert_seconds(now_playing[4]) + elapsed = self._convert_seconds(float(now_playing[3]) * float(now_playing[4]) / 100) + return { + 'title': now_playing[0], + 'artist': now_playing[1], + 'album': now_playing[2], + 'elapsed': elapsed, + 'total': total, + 'state': state, + 'state_symbol': self.STATE_SYMBOLS.get(state) + } +now_playing = NowPlayingSegment() diff --git a/powerline/segments/common/sys.py b/powerline/segments/common/sys.py new file mode 100644 index 00000000..d3a56307 --- /dev/null +++ b/powerline/segments/common/sys.py @@ -0,0 +1,174 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import os + +from multiprocessing import cpu_count as _cpu_count + +from powerline.lib.threaded import ThreadedSegment +from powerline.lib import add_divider_highlight_group +from powerline.segments import with_docstring + + +cpu_count = None + + +def system_load(pl, format='{avg:.1f}', threshold_good=1, threshold_bad=2, track_cpu_count=False): + '''Return system load average. + + Highlights using ``system_load_good``, ``system_load_bad`` and + ``system_load_ugly`` highlighting groups, depending on the thresholds + passed to the function. + + :param str format: + format string, receives ``avg`` as an argument + :param float threshold_good: + threshold for gradient level 0: any normalized load average below this + value will have this gradient level. + :param float threshold_bad: + threshold for gradient level 100: any normalized load average above this + value will have this gradient level. Load averages between + ``threshold_good`` and ``threshold_bad`` receive gradient level that + indicates relative position in this interval: + (``100 * (cur-good) / (bad-good)``). + Note: both parameters are checked against normalized load averages. + :param bool track_cpu_count: + if True powerline will continuously poll the system to detect changes + in the number of CPUs. + + Divider highlight group used: ``background:divider``. + + Highlight groups used: ``system_load_gradient`` (gradient) or ``system_load``. + ''' + global cpu_count + try: + cpu_num = cpu_count = _cpu_count() if cpu_count is None or track_cpu_count else cpu_count + except NotImplementedError: + pl.warn('Unable to get CPU count: method is not implemented') + return None + ret = [] + for avg in os.getloadavg(): + normalized = avg / cpu_num + if normalized < threshold_good: + gradient_level = 0 + elif normalized < threshold_bad: + gradient_level = (normalized - threshold_good) * 100.0 / (threshold_bad - threshold_good) + else: + gradient_level = 100 + ret.append({ + 'contents': format.format(avg=avg), + 'highlight_group': ['system_load_gradient', 'system_load'], + 'divider_highlight_group': 'background:divider', + 'gradient_level': gradient_level, + }) + ret[0]['contents'] += ' ' + ret[1]['contents'] += ' ' + return ret + + +try: + import psutil + + class CPULoadPercentSegment(ThreadedSegment): + interval = 1 + + def update(self, old_cpu): + return psutil.cpu_percent(interval=None) + + def run(self): + while not self.shutdown_event.is_set(): + try: + self.update_value = psutil.cpu_percent(interval=self.interval) + except Exception as e: + self.exception('Exception while calculating cpu_percent: {0}', str(e)) + + def render(self, cpu_percent, format='{0:.0f}%', **kwargs): + if not cpu_percent: + return None + return [{ + 'contents': format.format(cpu_percent), + 'gradient_level': cpu_percent, + 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], + }] +except ImportError: + class CPULoadPercentSegment(ThreadedSegment): + interval = 1 + + @staticmethod + def startup(**kwargs): + pass + + @staticmethod + def start(): + pass + + @staticmethod + def shutdown(): + pass + + @staticmethod + def render(cpu_percent, pl, format='{0:.0f}%', **kwargs): + pl.warn('Module “psutil” is not installed, thus CPU load is not available') + return None + + +cpu_load_percent = with_docstring(CPULoadPercentSegment(), +'''Return the average CPU load as a percentage. + +Requires the ``psutil`` module. + +:param str format: + Output format. Accepts measured CPU load as the first argument. + +Highlight groups used: ``cpu_load_percent_gradient`` (gradient) or ``cpu_load_percent``. +''') + + +if os.path.exists('/proc/uptime'): + def _get_uptime(): + with open('/proc/uptime', 'r') as f: + return int(float(f.readline().split()[0])) +elif 'psutil' in globals(): + from time import time + + def _get_uptime(): + # psutil.BOOT_TIME is not subject to clock adjustments, but time() is. + # Thus it is a fallback to /proc/uptime reading and not the reverse. + return int(time() - psutil.BOOT_TIME) +else: + def _get_uptime(): + raise NotImplementedError + + +@add_divider_highlight_group('background:divider') +def uptime(pl, days_format='{days:d}d', hours_format=' {hours:d}h', minutes_format=' {minutes:d}m', seconds_format=' {seconds:d}s', shorten_len=3): + '''Return system uptime. + + :param str days_format: + day format string, will be passed ``days`` as the argument + :param str hours_format: + hour format string, will be passed ``hours`` as the argument + :param str minutes_format: + minute format string, will be passed ``minutes`` as the argument + :param str seconds_format: + second format string, will be passed ``seconds`` as the argument + :param int shorten_len: + shorten the amount of units (days, hours, etc.) displayed + + Divider highlight group used: ``background:divider``. + ''' + try: + seconds = _get_uptime() + except NotImplementedError: + pl.warn('Unable to get uptime. You should install psutil module') + return None + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + time_formatted = list(filter(None, [ + days_format.format(days=days) if days and days_format else None, + hours_format.format(hours=hours) if hours and hours_format else None, + minutes_format.format(minutes=minutes) if minutes and minutes_format else None, + seconds_format.format(seconds=seconds) if seconds and seconds_format else None, + ]))[0:shorten_len] + return ''.join(time_formatted).strip() diff --git a/powerline/segments/common/time.py b/powerline/segments/common/time.py new file mode 100644 index 00000000..f13e196e --- /dev/null +++ b/powerline/segments/common/time.py @@ -0,0 +1,89 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +from datetime import datetime + + +def date(pl, format='%Y-%m-%d', istime=False): + '''Return the current date. + + :param str format: + strftime-style date format string + :param bool istime: + If true then segment uses ``time`` highlight group. + + Divider highlight group used: ``time:divider``. + + Highlight groups used: ``time`` or ``date``. + ''' + return [{ + 'contents': datetime.now().strftime(format), + 'highlight_group': (['time'] if istime else []) + ['date'], + 'divider_highlight_group': 'time:divider' if istime else None, + }] + + +UNICODE_TEXT_TRANSLATION = { + ord('\''): '’', + ord('-'): '‐', +} + + +def fuzzy_time(pl, unicode_text=False): + '''Display the current time as fuzzy time, e.g. "quarter past six". + + :param bool unicode_text: + If true then hyphenminuses (regular ASCII ``-``) and single quotes are + replaced with unicode dashes and apostrophes. + ''' + hour_str = ['twelve', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven'] + minute_str = { + 5: 'five past', + 10: 'ten past', + 15: 'quarter past', + 20: 'twenty past', + 25: 'twenty-five past', + 30: 'half past', + 35: 'twenty-five to', + 40: 'twenty to', + 45: 'quarter to', + 50: 'ten to', + 55: 'five to', + } + special_case_str = { + (23, 58): 'round about midnight', + (23, 59): 'round about midnight', + (0, 0): 'midnight', + (0, 1): 'round about midnight', + (0, 2): 'round about midnight', + (12, 0): 'noon', + } + + now = datetime.now() + + try: + return special_case_str[(now.hour, now.minute)] + except KeyError: + pass + + hour = now.hour + if now.minute > 32: + if hour == 23: + hour = 0 + else: + hour += 1 + if hour > 11: + hour = hour - 12 + hour = hour_str[hour] + + minute = int(round(now.minute / 5.0) * 5) + if minute == 60 or minute == 0: + result = ' '.join([hour, 'o\'clock']) + else: + minute = minute_str[minute] + result = ' '.join([minute, hour]) + + if unicode_text: + result = result.translate(UNICODE_TEXT_TRANSLATION) + + return result diff --git a/powerline/segments/common/vcs.py b/powerline/segments/common/vcs.py new file mode 100644 index 00000000..a6311bc5 --- /dev/null +++ b/powerline/segments/common/vcs.py @@ -0,0 +1,33 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +from powerline.lib.vcs import guess, tree_status +from powerline.theme import requires_segment_info, requires_filesystem_watcher + + +@requires_filesystem_watcher +@requires_segment_info +def branch(pl, segment_info, create_watcher, status_colors=False): + '''Return the current VCS branch. + + :param bool status_colors: + determines whether repository status will be used to determine highlighting. Default: False. + + Highlight groups used: ``branch_clean``, ``branch_dirty``, ``branch``. + ''' + name = segment_info['getcwd']() + repo = guess(path=name, create_watcher=create_watcher) + if repo is not None: + branch = repo.branch() + scol = ['branch'] + if status_colors: + try: + status = tree_status(repo, pl) + except Exception as e: + pl.exception('Failed to compute tree status: {0}', str(e)) + status = '?' + scol.insert(0, 'branch_dirty' if status and status.strip() else 'branch_clean') + return [{ + 'contents': branch, + 'highlight_group': scol, + }] diff --git a/powerline/segments/common/wthr.py b/powerline/segments/common/wthr.py new file mode 100644 index 00000000..8819806f --- /dev/null +++ b/powerline/segments/common/wthr.py @@ -0,0 +1,229 @@ +# vim:fileencoding=utf-8:noet +from __future__ import (unicode_literals, division, absolute_import, print_function) + +import json + +from powerline.lib.url import urllib_read, urllib_urlencode +from powerline.lib.threaded import KwThreadedSegment +from powerline.segments import with_docstring + + +# XXX Warning: module name must not be equal to the segment name as long as this +# segment is imported into powerline.segments.common module. + + +# Weather condition code descriptions available at +# http://developer.yahoo.com/weather/#codes +weather_conditions_codes = ( + ('tornado', 'stormy'), # 0 + ('tropical_storm', 'stormy'), # 1 + ('hurricane', 'stormy'), # 2 + ('severe_thunderstorms', 'stormy'), # 3 + ('thunderstorms', 'stormy'), # 4 + ('mixed_rain_and_snow', 'rainy' ), # 5 + ('mixed_rain_and_sleet', 'rainy' ), # 6 + ('mixed_snow_and_sleet', 'snowy' ), # 7 + ('freezing_drizzle', 'rainy' ), # 8 + ('drizzle', 'rainy' ), # 9 + ('freezing_rain', 'rainy' ), # 10 + ('showers', 'rainy' ), # 11 + ('showers', 'rainy' ), # 12 + ('snow_flurries', 'snowy' ), # 13 + ('light_snow_showers', 'snowy' ), # 14 + ('blowing_snow', 'snowy' ), # 15 + ('snow', 'snowy' ), # 16 + ('hail', 'snowy' ), # 17 + ('sleet', 'snowy' ), # 18 + ('dust', 'foggy' ), # 19 + ('fog', 'foggy' ), # 20 + ('haze', 'foggy' ), # 21 + ('smoky', 'foggy' ), # 22 + ('blustery', 'foggy' ), # 23 + ('windy', ), # 24 + ('cold', 'day' ), # 25 + ('clouds', 'cloudy'), # 26 + ('mostly_cloudy_night', 'cloudy'), # 27 + ('mostly_cloudy_day', 'cloudy'), # 28 + ('partly_cloudy_night', 'cloudy'), # 29 + ('partly_cloudy_day', 'cloudy'), # 30 + ('clear_night', 'night' ), # 31 + ('sun', 'sunny' ), # 32 + ('fair_night', 'night' ), # 33 + ('fair_day', 'day' ), # 34 + ('mixed_rain_and_hail', 'rainy' ), # 35 + ('hot', 'sunny' ), # 36 + ('isolated_thunderstorms', 'stormy'), # 37 + ('scattered_thunderstorms', 'stormy'), # 38 + ('scattered_thunderstorms', 'stormy'), # 39 + ('scattered_showers', 'rainy' ), # 40 + ('heavy_snow', 'snowy' ), # 41 + ('scattered_snow_showers', 'snowy' ), # 42 + ('heavy_snow', 'snowy' ), # 43 + ('partly_cloudy', 'cloudy'), # 44 + ('thundershowers', 'rainy' ), # 45 + ('snow_showers', 'snowy' ), # 46 + ('isolated_thundershowers', 'rainy' ), # 47 +) +# ('day', (25, 34)), +# ('rainy', (5, 6, 8, 9, 10, 11, 12, 35, 40, 45, 47)), +# ('cloudy', (26, 27, 28, 29, 30, 44)), +# ('snowy', (7, 13, 14, 15, 16, 17, 18, 41, 42, 43, 46)), +# ('stormy', (0, 1, 2, 3, 4, 37, 38, 39)), +# ('foggy', (19, 20, 21, 22, 23)), +# ('sunny', (32, 36)), +# ('night', (31, 33))): +weather_conditions_icons = { + 'day': 'DAY', + 'blustery': 'WIND', + 'rainy': 'RAIN', + 'cloudy': 'CLOUDS', + 'snowy': 'SNOW', + 'stormy': 'STORM', + 'foggy': 'FOG', + 'sunny': 'SUN', + 'night': 'NIGHT', + 'windy': 'WINDY', + 'not_available': 'NA', + 'unknown': 'UKN', +} + +temp_conversions = { + 'C': lambda temp: temp, + 'F': lambda temp: (temp * 9 / 5) + 32, + 'K': lambda temp: temp + 273.15, +} + +# Note: there are also unicode characters for units: ℃, ℉ and K +temp_units = { + 'C': '°C', + 'F': '°F', + 'K': 'K', +} + + +class WeatherSegment(KwThreadedSegment): + interval = 600 + default_location = None + location_urls = {} + + @staticmethod + def key(location_query=None, **kwargs): + return location_query + + def get_request_url(self, location_query): + try: + return self.location_urls[location_query] + except KeyError: + if location_query is None: + location_data = json.loads(urllib_read('http://freegeoip.net/json/')) + location = ','.join(( + location_data['city'], + location_data['region_name'], + location_data['country_code'] + )) + self.info('Location returned by freegeoip is {0}', location) + else: + location = location_query + query_data = { + 'q': + 'use "https://raw.githubusercontent.com/yql/yql-tables/master/weather/weather.bylocation.xml" as we;' + 'select * from we where location="{0}" and unit="c"'.format(location).encode('utf-8'), + 'format': 'json', + } + self.location_urls[location_query] = url = ( + 'http://query.yahooapis.com/v1/public/yql?' + urllib_urlencode(query_data)) + return url + + def compute_state(self, location_query): + url = self.get_request_url(location_query) + raw_response = urllib_read(url) + if not raw_response: + self.error('Failed to get response') + return + response = json.loads(raw_response) + condition = response['query']['results']['weather']['rss']['channel']['item']['condition'] + condition_code = int(condition['code']) + temp = float(condition['temp']) + + try: + icon_names = weather_conditions_codes[condition_code] + except IndexError: + if condition_code == 3200: + icon_names = ('not_available',) + self.warn('Weather is not available for location {0}', self.location) + else: + icon_names = ('unknown',) + self.error('Unknown condition code: {0}', condition_code) + + return (temp, icon_names) + + def render_one(self, weather, icons=None, unit='C', temp_format=None, temp_coldest=-30, temp_hottest=40, **kwargs): + if not weather: + return None + + temp, icon_names = weather + + for icon_name in icon_names: + if icons: + if icon_name in icons: + icon = icons[icon_name] + break + else: + icon = weather_conditions_icons[icon_names[-1]] + + temp_format = temp_format or ('{temp:.0f}' + temp_units[unit]) + converted_temp = temp_conversions[unit](temp) + if temp <= temp_coldest: + gradient_level = 0 + elif temp >= temp_hottest: + gradient_level = 100 + else: + gradient_level = (temp - temp_coldest) * 100.0 / (temp_hottest - temp_coldest) + groups = ['weather_condition_' + icon_name for icon_name in icon_names] + ['weather_conditions', 'weather'] + return [ + { + 'contents': icon + ' ', + 'highlight_group': groups, + 'divider_highlight_group': 'background:divider', + }, + { + 'contents': temp_format.format(temp=converted_temp), + 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], + 'divider_highlight_group': 'background:divider', + 'gradient_level': gradient_level, + }, + ] + + +weather = with_docstring(WeatherSegment(), +'''Return weather from Yahoo! Weather. + +Uses GeoIP lookup from http://freegeoip.net/ to automatically determine +your current location. This should be changed if you’re in a VPN or if your +IP address is registered at another location. + +Returns a list of colorized icon and temperature segments depending on +weather conditions. + +:param str unit: + temperature unit, can be one of ``F``, ``C`` or ``K`` +:param str location_query: + location query for your current location, e.g. ``oslo, norway`` +:param dict icons: + dict for overriding default icons, e.g. ``{'heavy_snow' : u'❆'}`` +:param str temp_format: + format string, receives ``temp`` as an argument. Should also hold unit. +:param float temp_coldest: + coldest temperature. Any temperature below it will have gradient level equal + to zero. +:param float temp_hottest: + hottest temperature. Any temperature above it will have gradient level equal + to 100. Temperatures between ``temp_coldest`` and ``temp_hottest`` receive + gradient level that indicates relative position in this interval + (``100 * (cur-coldest) / (hottest-coldest)``). + +Divider highlight group used: ``background:divider``. + +Highlight groups used: ``weather_conditions`` or ``weather``, ``weather_temp_gradient`` (gradient) or ``weather``. +Also uses ``weather_conditions_{condition}`` for all weather conditions supported by Yahoo. +''') diff --git a/powerline/segments/shell.py b/powerline/segments/shell.py index f1ac2845..1b763723 100644 --- a/powerline/segments/shell.py +++ b/powerline/segments/shell.py @@ -3,7 +3,8 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct from powerline.theme import requires_segment_info from powerline.segments import with_docstring -from powerline.segments.common import CwdSegment +from powerline.segments.common.env import CwdSegment +from powerline.lib.unicode import out_u @requires_segment_info @@ -136,7 +137,7 @@ class ShellCwdSegment(CwdSegment): def get_shortened_path(self, pl, segment_info, use_shortened_path=True, **kwargs): if use_shortened_path: try: - return segment_info['shortened_path'] + return out_u(segment_info['shortened_path']) except KeyError: pass return super(ShellCwdSegment, self).get_shortened_path(pl, segment_info, **kwargs) diff --git a/powerline/segments/vim/__init__.py b/powerline/segments/vim/__init__.py index 9f59b7c4..6467890d 100644 --- a/powerline/segments/vim/__init__.py +++ b/powerline/segments/vim/__init__.py @@ -28,13 +28,11 @@ except ImportError: vim_funcs = { - 'virtcol': vim_get_func('virtcol', rettype=int), + 'virtcol': vim_get_func('virtcol', rettype='int'), 'getpos': vim_get_func('getpos'), - 'fnamemodify': vim_get_func('fnamemodify'), - 'expand': vim_get_func('expand'), - 'bufnr': vim_get_func('bufnr', rettype=int), - 'line2byte': vim_get_func('line2byte', rettype=int), - 'line': vim_get_func('line', rettype=int), + 'fnamemodify': vim_get_func('fnamemodify', rettype='bytes'), + 'line2byte': vim_get_func('line2byte', rettype='int'), + 'line': vim_get_func('line', rettype='int'), } vim_modes = { @@ -135,8 +133,8 @@ def visual_range(pl, segment_info, CTRL_V_text='{rows} x {vcols}', v_text_onelin vcols Number of virtual columns in the selection ========= ============================================================= ''' - sline, scol, soff = [int(v) for v in vim_funcs['getpos']("v")[1:]] - eline, ecol, eoff = [int(v) for v in vim_funcs['getpos'](".")[1:]] + sline, scol, soff = [int(v) for v in vim_funcs['getpos']('v')[1:]] + eline, ecol, eoff = [int(v) for v in vim_funcs['getpos']('.')[1:]] svcol = vim_funcs['virtcol']([sline, scol, soff]) evcol = vim_funcs['virtcol']([eline, ecol, eoff]) rows = abs(eline - sline) + 1 @@ -225,7 +223,7 @@ def file_scheme(pl, segment_info): name will look like :file:`zipfile:/path/to/archive.zip::file.txt`. ``file_scheme`` segment will catch ``zipfile`` part here. ''' - name = buffer_name(segment_info['buffer']) + name = buffer_name(segment_info) if not name: return None match = SCHEME_RE.match(name) @@ -254,7 +252,7 @@ def file_directory(pl, segment_info, remove_scheme=True, shorten_user=True, shor Shorten all directories in :file:`/home/` to :file:`~user/` instead of :file:`/home/user/`. Does not work for files with scheme present. ''' - name = buffer_name(segment_info['buffer']) + name = buffer_name(segment_info) if not name: return None match = SCHEME_RE.match(name) @@ -271,7 +269,7 @@ def file_directory(pl, segment_info, remove_scheme=True, shorten_user=True, shor return None if shorten_home and file_directory.startswith('/home/'): file_directory = b'~' + file_directory[6:] - file_directory = file_directory.decode('utf-8', 'powerline_vim_strtrans_error') + file_directory = file_directory.decode(segment_info['encoding'], 'powerline_vim_strtrans_error') return file_directory + os.sep @@ -286,7 +284,7 @@ def file_name(pl, segment_info, display_no_file=False, no_file_text='[No file]') Highlight groups used: ``file_name_no_file`` or ``file_name``, ``file_name``. ''' - name = buffer_name(segment_info['buffer']) + name = buffer_name(segment_info) if not name: if display_no_file: return [{ @@ -295,7 +293,7 @@ def file_name(pl, segment_info, display_no_file=False, no_file_text='[No file]') }] else: return None - return os.path.basename(name).decode('utf-8', 'powerline_vim_strtrans_error') + return os.path.basename(name).decode(segment_info['encoding'], 'powerline_vim_strtrans_error') @window_cached @@ -306,7 +304,7 @@ def file_size(pl, suffix='B', si_prefix=False): string appended to the file size :param bool si_prefix: use SI prefix, e.g. MB instead of MiB - :return: file size or None if the file isn't saved or if the size is too big to fit in a number + :return: file size or None if the file isn’t saved or if the size is too big to fit in a number ''' # Note: returns file size in &encoding, not in &fileencoding. But returned # size is updated immediately; and it is valid for any buffer @@ -470,10 +468,13 @@ def modified_buffers(pl, text='+ ', join_str=','): :param str join_str: string to use for joining the modified buffer list ''' - buffer_len = vim_funcs['bufnr']('$') - buffer_mod = [str(bufnr) for bufnr in range(1, buffer_len + 1) if int(getbufvar(bufnr, '&modified') or 0)] - if buffer_mod: - return text + join_str.join(buffer_mod) + buffer_mod_text = join_str.join(( + str(buffer.number) + for buffer in vim.buffers + if int(vim_getbufoption({'buffer': buffer, 'bufnr': buffer.number}, 'modified')) + )) + if buffer_mod_text: + return text + buffer_mod_text return None @@ -489,7 +490,7 @@ def branch(pl, segment_info, create_watcher, status_colors=False): Divider highlight group used: ``branch:divider``. ''' - name = segment_info['buffer'].name + name = buffer_name(segment_info) skip = not (name and (not vim_getbufoption(segment_info, 'buftype'))) if not skip: repo = guess(path=name, create_watcher=create_watcher) @@ -513,7 +514,7 @@ def file_vcs_status(pl, segment_info, create_watcher): Highlight groups used: ``file_vcs_status``. ''' - name = segment_info['buffer'].name + name = buffer_name(segment_info) skip = not (name and (not vim_getbufoption(segment_info, 'buftype'))) if not skip: repo = guess(path=name, create_watcher=create_watcher) diff --git a/powerline/segments/vim/plugin/tagbar.py b/powerline/segments/vim/plugin/tagbar.py index 7421208e..5daf5055 100644 --- a/powerline/segments/vim/plugin/tagbar.py +++ b/powerline/segments/vim/plugin/tagbar.py @@ -10,7 +10,21 @@ from powerline.segments.vim import window_cached @window_cached -def current_tag(pl): +def current_tag(pl, flags='s'): + '''Return tag that is near the cursor. + + :param str flags: + Specifies additional properties of the displayed tag. Supported values: + + * s - display complete signature + * f - display the full hierarchy of the tag + * p - display the raw prototype + + More info in the `official documentation`_ (search for + “tagbar#currenttag”). + + .. _`official documentation`: https://github.com/majutsushi/tagbar/blob/master/doc/tagbar.txt + ''' if not int(vim.eval('exists(":Tagbar")')): - return - return vim.eval('tagbar#currenttag("%s", "")') + return None + return vim.eval('tagbar#currenttag("%s", "", "{0}")'.format(flags)) diff --git a/powerline/theme.py b/powerline/theme.py index 6bfc9b45..53921e43 100644 --- a/powerline/theme.py +++ b/powerline/theme.py @@ -163,6 +163,6 @@ class Theme(object): elif segment['align'] == 'c': segment['contents'] = segment['contents'].center(segment['width']) # We need to yield a copy of the segment, or else mode-dependent - # segment contents can't be cached correctly e.g. when caching + # segment contents can’t be cached correctly e.g. when caching # non-current window contents for vim statuslines yield segment.copy() diff --git a/powerline/vim.py b/powerline/vim.py index d0123707..29c82bdf 100644 --- a/powerline/vim.py +++ b/powerline/vim.py @@ -2,6 +2,7 @@ from __future__ import (unicode_literals, division, absolute_import, print_function) import sys +import json from itertools import count @@ -11,9 +12,6 @@ from powerline.bindings.vim import vim_get_func, vim_getvar from powerline import Powerline, FailedUnicode from powerline.lib import mergedicts -if not hasattr(vim, 'bindeval'): - import json - def _override_from(config, override_varname): try: @@ -134,7 +132,7 @@ class VimPowerline(Powerline): set_pycmd(pycmd) # pyeval() and vim.bindeval were both introduced in one patch - if not hasattr(vim, 'bindeval') and can_replace_pyeval: + if (not hasattr(vim, 'bindeval') and can_replace_pyeval) or pyeval == 'PowerlinePyeval': vim.command((''' function! PowerlinePyeval(e) {pycmd} powerline.do_pyeval() @@ -148,23 +146,30 @@ class VimPowerline(Powerline): self.update_renderer() __main__.powerline = self - if ( - bool(int(vim.eval("has('gui_running') && argc() == 0"))) - and not vim.current.buffer.name - and len(vim.windows) == 1 - ): - # Hack to show startup screen. Problems in GUI: - # - Defining local value of &statusline option while computing global - # value purges startup screen. - # - Defining highlight group while computing statusline purges startup - # screen. - # This hack removes the “while computing statusline” part: both things - # are defined, but they are defined right now. - # - # The above condition disables this hack if no GUI is running, Vim did - # not open any files and there is only one window. Without GUI - # everything works, in other cases startup screen is not shown. - self.new_window() + try: + if ( + bool(int(vim.eval('has(\'gui_running\') && argc() == 0'))) + and not vim.current.buffer.name + and len(vim.windows) == 1 + ): + # Hack to show startup screen. Problems in GUI: + # - Defining local value of &statusline option while computing + # global value purges startup screen. + # - Defining highlight group while computing statusline purges + # startup screen. + # This hack removes the “while computing statusline” part: both + # things are defined, but they are defined right now. + # + # The above condition disables this hack if no GUI is running, + # Vim did not open any files and there is only one window. + # Without GUI everything works, in other cases startup screen is + # not shown. + self.new_window() + except UnicodeDecodeError: + # vim.current.buffer.name may raise UnicodeDecodeError when using + # Python-3*. Fortunately, this means that current buffer is not + # empty buffer, so the above condition should be False. + pass # Cannot have this in one line due to weird newline handling (in :execute # context newline is considered part of the command in just the same cases @@ -245,14 +250,15 @@ class VimPowerline(Powerline): def new_window(self): return self.render(*self.win_idx(None)) - if not hasattr(vim, 'bindeval'): - # Method for PowerlinePyeval function. Is here to reduce the number of - # requirements to __main__ globals to just one powerline object - # (previously it required as well vim and json) - @staticmethod - def do_pyeval(): - import __main__ - vim.command('return ' + json.dumps(eval(vim.eval('a:e'), __main__.__dict__))) + @staticmethod + def do_pyeval(): + '''Evaluate python string passed to PowerlinePyeval + + Is here to reduce the number of requirements to __main__ globals to just + one powerline object (previously it required as well vim and json). + ''' + import __main__ + vim.command('return ' + json.dumps(eval(vim.eval('a:e'), __main__.__dict__))) def setup_components(self, components): if components is None: diff --git a/scripts/powerline-daemon b/scripts/powerline-daemon index 5ac069d8..04cd1365 100755 --- a/scripts/powerline-daemon +++ b/scripts/powerline-daemon @@ -12,11 +12,11 @@ from select import select from signal import signal, SIGTERM from time import sleep from functools import partial -from locale import getpreferredencoding from io import BytesIO from powerline.shell import get_argparser, finish_args, ShellPowerline, write_output from powerline.lib.monotonic import monotonic +from powerline.lib.encoding import get_preferred_output_encoding is_daemon = False @@ -74,6 +74,7 @@ def render(args, environ, cwd): args.renderer_module, tuple(args.config) if args.config else None, tuple(args.theme_option) if args.theme_option else None, + tuple(args.config_path) if args.config_path else None, ) finish_args(args) powerline = None @@ -143,7 +144,7 @@ def do_write(conn, result): pass -encoding = getpreferredencoding() or 'UTF-8' +encoding = get_preferred_output_encoding() def safe_bytes(o, encoding=encoding): @@ -397,7 +398,7 @@ def main(): daemonize() if use_filesystem: - # Create a locked pid file containing the daemon's PID + # Create a locked pid file containing the daemon’s PID if lockpidfile() is None: if not args.quiet: sys.stderr.write( diff --git a/scripts/powerline-render b/scripts/powerline-render index 048c34ce..4d4903b9 100755 --- a/scripts/powerline-render +++ b/scripts/powerline-render @@ -8,14 +8,14 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import sys import os -from locale import getpreferredencoding - try: from powerline.shell import ShellPowerline, get_argparser, finish_args, write_output except ImportError: sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))) from powerline.shell import ShellPowerline, get_argparser, finish_args, write_output +from powerline.lib.unicode import get_preferred_output_encoding + if sys.version_info < (3,): write = sys.stdout.write @@ -28,4 +28,4 @@ if __name__ == '__main__': finish_args(args) powerline = ShellPowerline(args, run_once=True) segment_info = {'args': args, 'environ': os.environ} - write_output(args, powerline, segment_info, write, getpreferredencoding()) + write_output(args, powerline, segment_info, write, get_preferred_output_encoding()) diff --git a/setup.py b/setup.py index efb70e38..dfe88387 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ else: setup( name='powerline-status', - version='1.0', + version='1.1', description='The ultimate statusline/prompt utility.', long_description=README, classifiers=[ @@ -79,7 +79,7 @@ setup( author_email='kim.silkebaekken+vim@gmail.com', url='https://github.com/Lokaltog/powerline', license='MIT', - # XXX Python 3 doesn't allow compiled C files to be included in the scripts + # XXX Python 3 doesn’t allow compiled C files to be included in the scripts # list below. This is because Python 3 distutils tries to decode the file to # ASCII, and fails when powerline-client is a binary. # diff --git a/tests/lib/__init__.py b/tests/lib/__init__.py index 80af8097..2b5af567 100644 --- a/tests/lib/__init__.py +++ b/tests/lib/__init__.py @@ -42,7 +42,10 @@ def urllib_read(query_url): elif query_url.startswith('http://freegeoip.net/json/'): return '{"city": "Meppen", "region_code": "06", "region_name": "Niedersachsen", "areacode": "", "ip": "82.145.55.16", "zipcode": "49716", "longitude": 7.3167, "country_name": "Germany", "country_code": "DE", "metrocode": "", "latitude": 52.6833}' elif query_url.startswith('http://query.yahooapis.com/v1/public/'): - return r'{"query":{"count":1,"created":"2013-03-02T13:20:22Z","lang":"en-US","results":{"weather":{"rss":{"version":"2.0","geo":"http://www.w3.org/2003/01/geo/wgs84_pos#","yweather":"http://xml.weather.yahoo.com/ns/rss/1.0","channel":{"title":"Yahoo! Weather - Russia, RU","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","description":"Yahoo! Weather for Russia, RU","language":"en-us","lastBuildDate":"Sat, 02 Mar 2013 4:58 pm MSK","ttl":"60","location":{"city":"Russia","country":"Russia","region":""},"units":{"distance":"km","pressure":"mb","speed":"km/h","temperature":"C"},"wind":{"chill":"-9","direction":"0","speed":""},"atmosphere":{"humidity":"94","pressure":"1006.1","rising":"0","visibility":""},"astronomy":{"sunrise":"10:04 am","sunset":"7:57 pm"},"image":{"title":"Yahoo! Weather","width":"142","height":"18","link":"http://weather.yahoo.com","url":"http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"},"item":{"title":"Conditions for Russia, RU at 4:58 pm MSK","lat":"59.45","long":"108.83","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","pubDate":"Sat, 02 Mar 2013 4:58 pm MSK","condition":{"code":"30","date":"Sat, 02 Mar 2013 4:58 pm MSK","temp":"-9","text":"Partly Cloudy"},"description":"
\nCurrent Conditions:
\nPartly Cloudy, -9 C
\n
Forecast:
\nSat - Partly Cloudy. High: -9 Low: -19
\nSun - Partly Cloudy. High: -12 Low: -18
\n
\nFull Forecast at Yahoo! Weather

\n(provided by The Weather Channel)
","forecast":[{"code":"29","date":"2 Mar 2013","day":"Sat","high":"-9","low":"-19","text":"Partly Cloudy"},{"code":"30","date":"3 Mar 2013","day":"Sun","high":"-12","low":"-18","text":"Partly Cloudy"}],"guid":{"isPermaLink":"false","content":"RSXX1511_2013_03_03_7_00_MSK"}}}}}}}}' + if 'Meppen' in query_url: + return r'{"query":{"count":1,"created":"2013-03-02T13:20:22Z","lang":"en-US","results":{"weather":{"rss":{"version":"2.0","geo":"http://www.w3.org/2003/01/geo/wgs84_pos#","yweather":"http://xml.weather.yahoo.com/ns/rss/1.0","channel":{"title":"Yahoo! Weather - Russia, RU","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","description":"Yahoo! Weather for Russia, RU","language":"en-us","lastBuildDate":"Sat, 02 Mar 2013 4:58 pm MSK","ttl":"60","location":{"city":"Russia","country":"Russia","region":""},"units":{"distance":"km","pressure":"mb","speed":"km/h","temperature":"C"},"wind":{"chill":"-9","direction":"0","speed":""},"atmosphere":{"humidity":"94","pressure":"1006.1","rising":"0","visibility":""},"astronomy":{"sunrise":"10:04 am","sunset":"7:57 pm"},"image":{"title":"Yahoo! Weather","width":"142","height":"18","link":"http://weather.yahoo.com","url":"http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"},"item":{"title":"Conditions for Russia, RU at 4:58 pm MSK","lat":"59.45","long":"108.83","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","pubDate":"Sat, 02 Mar 2013 4:58 pm MSK","condition":{"code":"30","date":"Sat, 02 Mar 2013 4:58 pm MSK","temp":"-9","text":"Partly Cloudy"},"description":"
\nCurrent Conditions:
\nPartly Cloudy, -9 C
\n
Forecast:
\nSat - Partly Cloudy. High: -9 Low: -19
\nSun - Partly Cloudy. High: -12 Low: -18
\n
\nFull Forecast at Yahoo! Weather

\n(provided by The Weather Channel)
","forecast":[{"code":"29","date":"2 Mar 2013","day":"Sat","high":"-9","low":"-19","text":"Partly Cloudy"},{"code":"30","date":"3 Mar 2013","day":"Sun","high":"-12","low":"-18","text":"Partly Cloudy"}],"guid":{"isPermaLink":"false","content":"RSXX1511_2013_03_03_7_00_MSK"}}}}}}}}' + elif 'Moscow' in query_url: + return r'{"query":{"count":1,"created":"2013-03-02T13:20:22Z","lang":"en-US","results":{"weather":{"rss":{"version":"2.0","geo":"http://www.w3.org/2003/01/geo/wgs84_pos#","yweather":"http://xml.weather.yahoo.com/ns/rss/1.0","channel":{"title":"Yahoo! Weather - Russia, RU","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","description":"Yahoo! Weather for Russia, RU","language":"en-us","lastBuildDate":"Sat, 02 Mar 2013 4:58 pm MSK","ttl":"60","location":{"city":"Russia","country":"Russia","region":""},"units":{"distance":"km","pressure":"mb","speed":"km/h","temperature":"C"},"wind":{"chill":"-9","direction":"0","speed":""},"atmosphere":{"humidity":"94","pressure":"1006.1","rising":"0","visibility":""},"astronomy":{"sunrise":"10:04 am","sunset":"7:57 pm"},"image":{"title":"Yahoo! Weather","width":"142","height":"18","link":"http://weather.yahoo.com","url":"http://l.yimg.com/a/i/brand/purplelogo//uh/us/news-wea.gif"},"item":{"title":"Conditions for Russia, RU at 4:58 pm MSK","lat":"59.45","long":"108.83","link":"http://us.rd.yahoo.com/dailynews/rss/weather/Russia__RU/*http://weather.yahoo.com/forecast/RSXX1511_c.html","pubDate":"Sat, 02 Mar 2013 4:58 pm MSK","condition":{"code":"30","date":"Sat, 02 Mar 2013 4:58 pm MSK","temp":"19","text":"Partly Cloudy"},"description":"
\nCurrent Conditions:
\nPartly Cloudy, -9 C
\n
Forecast:
\nSat - Partly Cloudy. High: -9 Low: -19
\nSun - Partly Cloudy. High: -12 Low: -18
\n
\nFull Forecast at Yahoo! Weather

\n(provided by The Weather Channel)
","forecast":[{"code":"29","date":"2 Mar 2013","day":"Sat","high":"-9","low":"-19","text":"Partly Cloudy"},{"code":"30","date":"3 Mar 2013","day":"Sun","high":"-12","low":"-18","text":"Partly Cloudy"}],"guid":{"isPermaLink":"false","content":"RSXX1511_2013_03_03_7_00_MSK"}}}}}}}}' else: raise NotImplementedError diff --git a/tests/run_daemon_tests.sh b/tests/run_daemon_tests.sh new file mode 100755 index 00000000..e42a3bc8 --- /dev/null +++ b/tests/run_daemon_tests.sh @@ -0,0 +1,41 @@ +#!/bin/sh +FAILED=0 +export ADDRESS="powerline-ipc-test-$$" +echo "Powerline address: $ADDRESS" +if $PYTHON scripts/powerline-daemon -s$ADDRESS ; then + sleep 1 + if ! ( \ + $PYTHON client/powerline.py --socket $ADDRESS -p/dev/null shell left | \ + grep 'file not found' + ) ; then + echo "-p/dev/null argument ignored or not treated properly" + FAILED=1 + fi + if ( \ + $PYTHON client/powerline.py --socket $ADDRESS \ + -p$PWD/powerline/config_files shell left | \ + grep 'file not found' + ) ; then + echo "-p/dev/null argument remembered while it should not" + FAILED=1 + fi + if ! ( \ + cd tests && \ + $PYTHON ../client/powerline.py --socket $ADDRESS \ + -p$PWD/../powerline/config_files shell left | \ + grep 'tests' + ) ; then + echo "Output lacks string “tests”" + FAILED=1 + fi +else + echo "Daemon exited with status $?" + FAILED=1 +fi +if $PYTHON scripts/powerline-daemon -s$ADDRESS -k ; then + : +else + echo "powerline-daemon -k failed with exit code $?" + FAILED=1 +fi +exit $FAILED diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 5172fb42..8ab6064f 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -653,9 +653,9 @@ class TestVim(TestCase): window = vim_module.current.window window_id = 1 winnr = window.number - self.assertEqual(powerline.render(window, window_id, winnr), '%#Pl_3_8404992_4_192_underline#\xa0abc%#Pl_4_192_NONE_None_NONE#>>') + self.assertEqual(powerline.render(window, window_id, winnr), b'%#Pl_3_8404992_4_192_underline#\xc2\xa0abc%#Pl_4_192_NONE_None_NONE#>>') vim_module._environ['TEST'] = 'def' - self.assertEqual(powerline.render(window, window_id, winnr), '%#Pl_3_8404992_4_192_underline#\xa0def%#Pl_4_192_NONE_None_NONE#>>') + self.assertEqual(powerline.render(window, window_id, winnr), b'%#Pl_3_8404992_4_192_underline#\xc2\xa0def%#Pl_4_192_NONE_None_NONE#>>') def test_local_themes(self): # Regression test: VimPowerline.add_local_theme did not work properly. @@ -682,7 +682,7 @@ class TestVim(TestCase): window = vim_module.current.window window_id = 1 winnr = window.number - self.assertEqual(powerline.render(window, window_id, winnr), '%#Pl_5_12583104_6_32896_NONE#\xa0\u201cbar\u201d%#Pl_6_32896_NONE_None_NONE#>>') + self.assertEqual(powerline.render(window, window_id, winnr), b'%#Pl_5_12583104_6_32896_NONE#\xc2\xa0\xe2\x80\x9cbar\xe2\x80\x9d%#Pl_6_32896_NONE_None_NONE#>>') @classmethod def setUpClass(cls): diff --git a/tests/test_invalid_unicode.vim b/tests/test_invalid_unicode.vim new file mode 100644 index 00000000..d09e2bb2 --- /dev/null +++ b/tests/test_invalid_unicode.vim @@ -0,0 +1,19 @@ +set encoding=utf-8 +let g:powerline_config_paths = [expand(':p:h:h') . '/powerline/config_files'] +set laststatus=2 +set showtabline=2 +edit `="\xFF"` +redir => g:messages + try + source powerline/bindings/vim/plugin/powerline.vim + redrawstatus! + catch + call writefile(['Unexpected exception', v:exception], 'message.fail') + cquit + endtry +redir END +if g:messages =~# '\v\S' + call writefile(['Unexpected messages'] + split(g:messages, "\n", 1), 'message.fail') + cquit +endif +qall! diff --git a/tests/test_lib.py b/tests/test_lib.py index ef8dc737..4af20fa1 100644 --- a/tests/test_lib.py +++ b/tests/test_lib.py @@ -445,7 +445,7 @@ class TestVCS(TestCase): call(['git', 'checkout', '-q', 'branch2'], cwd=GIT_REPO) self.do_branch_rename_test(repo, 'branch2') call(['git', 'checkout', '-q', '--detach', 'branch1'], cwd=GIT_REPO) - self.do_branch_rename_test(repo, lambda b: re.match(br'^[a-f0-9]+$', b)) + self.do_branch_rename_test(repo, lambda b: re.match(r'^[a-f0-9]+$', b)) finally: call(['git', 'checkout', '-q', 'master'], cwd=GIT_REPO) diff --git a/tests/test_provided_config_files.py b/tests/test_provided_config_files.py index 381cd842..ace5d14b 100644 --- a/tests/test_provided_config_files.py +++ b/tests/test_provided_config_files.py @@ -7,17 +7,42 @@ from __future__ import (unicode_literals, division, absolute_import, print_funct import sys import os import json +import logging import tests.vim as vim_module from tests.lib import Args, urllib_read, replace_attr from tests import TestCase +from powerline import NotInterceptedError +from powerline.segments.common import wthr + VBLOCK = chr(ord('V') - 0x40) SBLOCK = chr(ord('S') - 0x40) +class FailingLogger(logging.Logger): + def exception(self, *args, **kwargs): + super(FailingLogger, self).exception(*args, **kwargs) + raise NotInterceptedError('Unexpected exception occurred') + + +def get_logger(stream=None): + log_format = '%(asctime)s:%(levelname)s:%(message)s' + formatter = logging.Formatter(log_format) + + level = logging.WARNING + handler = logging.StreamHandler(stream) + handler.setLevel(level) + handler.setFormatter(formatter) + + logger = FailingLogger('powerline') + logger.setLevel(level) + logger.addHandler(handler) + return logger + + class TestVimConfig(TestCase): def test_vim(self): from powerline.vim import VimPowerline @@ -33,14 +58,14 @@ class TestVimConfig(TestCase): ) with open(os.path.join(cfg_path, 'config.json'), 'r') as f: local_themes_raw = json.load(f)['ext']['vim']['local_themes'] - # Don't run tests on external/plugin segments + # Don’t run tests on external/plugin segments local_themes = dict((k, v) for (k, v) in local_themes_raw.items()) self.assertEqual(len(buffers), len(local_themes) - 1) outputs = {} i = 0 with vim_module._with('split'): - with VimPowerline() as powerline: + with VimPowerline(logger=get_logger()) as powerline: def check_output(mode, args, kwargs): if mode == 'nc': window = vim_module.windows[0] @@ -92,33 +117,33 @@ class TestConfig(TestCase): reload(common) from powerline.shell import ShellPowerline with replace_attr(common, 'urllib_read', urllib_read): - with ShellPowerline(Args(ext=['tmux']), run_once=False) as powerline: + with ShellPowerline(Args(ext=['tmux']), logger=get_logger(), run_once=False) as powerline: powerline.render() - with ShellPowerline(Args(ext=['tmux']), run_once=False) as powerline: + with ShellPowerline(Args(ext=['tmux']), logger=get_logger(), run_once=False) as powerline: powerline.render() def test_zsh(self): from powerline.shell import ShellPowerline args = Args(last_pipe_status=[1, 0], jobnum=0, ext=['shell'], renderer_module='.zsh') segment_info = {'args': args} - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info=segment_info) - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info=segment_info) segment_info['local_theme'] = 'select' - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info=segment_info) segment_info['local_theme'] = 'continuation' segment_info['parser_state'] = 'if cmdsubst' - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info=segment_info) def test_bash(self): from powerline.shell import ShellPowerline args = Args(last_exit_code=1, jobnum=0, ext=['shell'], renderer_module='.bash', config={'ext': {'shell': {'theme': 'default_leftonly'}}}) - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info={'args': args}) - with ShellPowerline(args, run_once=False) as powerline: + with ShellPowerline(args, logger=get_logger(), run_once=False) as powerline: powerline.render(segment_info={'args': args}) def test_ipython(self): @@ -131,11 +156,11 @@ class TestConfig(TestCase): segment_info = Args(prompt_count=1) - with IpyPowerline() as powerline: + with IpyPowerline(logger=get_logger()) as powerline: for prompt_type in ['in', 'in2']: powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info) powerline.render(is_prompt=True, matcher_info=prompt_type, segment_info=segment_info) - with IpyPowerline() as powerline: + with IpyPowerline(logger=get_logger()) as powerline: for prompt_type in ['out', 'rewrite']: powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info) powerline.render(is_prompt=False, matcher_info=prompt_type, segment_info=segment_info) @@ -145,8 +170,8 @@ class TestConfig(TestCase): from imp import reload reload(common) from powerline import Powerline - with replace_attr(common, 'urllib_read', urllib_read): - Powerline(ext='wm', renderer_module='pango_markup', run_once=True).render() + with replace_attr(wthr, 'urllib_read', urllib_read): + Powerline(logger=get_logger(), ext='wm', renderer_module='pango_markup', run_once=True).render() reload(common) diff --git a/tests/test_segments.py b/tests/test_segments.py index 2cc6545b..57f23abb 100644 --- a/tests/test_segments.py +++ b/tests/test_segments.py @@ -5,9 +5,11 @@ import sys import os from functools import partial +from collections import namedtuple from powerline.segments import shell, tmux, common from powerline.lib.vcs import get_fallback_create_watcher +from powerline.lib.unicode import out_u import tests.vim as vim_module @@ -18,10 +20,10 @@ from tests import TestCase, SkipTest def get_dummy_guess(**kwargs): if 'directory' in kwargs: def guess(path, create_watcher): - return Args(branch=lambda: os.path.basename(path), **kwargs) + return Args(branch=lambda: out_u(os.path.basename(path)), **kwargs) else: def guess(path, create_watcher): - return Args(branch=lambda: os.path.basename(path), directory=path, **kwargs) + return Args(branch=lambda: out_u(os.path.basename(path)), directory=path, **kwargs) return guess @@ -272,12 +274,6 @@ class TestShell(TestCase): cwd[0] = ValueError() self.assertRaises(ValueError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2) - def test_date(self): - pl = Pl() - with replace_attr(common, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))): - self.assertEqual(common.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_group': ['date'], 'divider_highlight_group': None}]) - self.assertEqual(common.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_group': ['time', 'date'], 'divider_highlight_group': 'time:divider'}]) - class TestTmux(TestCase): def test_attached_clients(self): @@ -294,26 +290,155 @@ class TestTmux(TestCase): class TestCommon(TestCase): + @classmethod + def setUpClass(cls): + module = __import__(str('powerline.segments.common.{0}'.format(cls.module_name))) + cls.module = getattr(module.segments.common, str(cls.module_name)) + + +class TestNet(TestCommon): + module_name = 'net' + def test_hostname(self): pl = Pl() with replace_env('SSH_CLIENT', '192.168.0.12 40921 22') as segment_info: - with replace_module_module(common, 'socket', gethostname=lambda: 'abc'): + with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'): self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc') - with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'): + with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'): self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc.mydomain') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), 'abc') segment_info['environ'].pop('SSH_CLIENT') - with replace_module_module(common, 'socket', gethostname=lambda: 'abc'): + with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'): self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), None) - with replace_module_module(common, 'socket', gethostname=lambda: 'abc.mydomain'): + with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'): self.assertEqual(common.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc') self.assertEqual(common.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), None) + def test_external_ip(self): + pl = Pl() + with replace_attr(self.module, 'urllib_read', urllib_read): + self.assertEqual(common.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}]) + + def test_internal_ip(self): + try: + import netifaces + except ImportError: + raise SkipTest('netifaces module is not available') + pl = Pl() + addr = { + 'enp2s0': { + netifaces.AF_INET: [{'addr': '192.168.100.200'}], + netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777%enp2s0'}] + }, + 'lo': { + netifaces.AF_INET: [{'addr': '127.0.0.1'}], + netifaces.AF_INET6: [{'addr': '::1'}] + }, + 'teredo': { + netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777'}] + }, + } + interfaces = ['lo', 'enp2s0', 'teredo'] + with replace_module_module( + self.module, 'netifaces', + interfaces=(lambda: interfaces), + ifaddresses=(lambda interface: addr[interface]), + AF_INET=netifaces.AF_INET, + AF_INET6=netifaces.AF_INET6, + ): + self.assertEqual(common.internal_ip(pl=pl), '192.168.100.200') + self.assertEqual(common.internal_ip(pl=pl, interface='detect'), '192.168.100.200') + self.assertEqual(common.internal_ip(pl=pl, interface='lo'), '127.0.0.1') + self.assertEqual(common.internal_ip(pl=pl, interface='teredo'), None) + self.assertEqual(common.internal_ip(pl=pl, ipv=4), '192.168.100.200') + self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=4), '192.168.100.200') + self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=4), '127.0.0.1') + self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=4), None) + self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0') + self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0') + self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=6), '::1') + self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=6), 'feff::5446:5eff:fe5a:7777') + interfaces[1:2] = () + self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777') + interfaces[1:2] = () + self.assertEqual(common.internal_ip(pl=pl, ipv=6), '::1') + interfaces[:] = () + self.assertEqual(common.internal_ip(pl=pl, ipv=6), None) + + def test_network_load(self): + from time import sleep + + def gb(interface): + return None + + f = [gb] + + def _get_bytes(interface): + return f[0](interface) + + pl = Pl() + + with replace_attr(self.module, '_get_bytes', _get_bytes): + common.network_load.startup(pl=pl) + try: + self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) + sleep(common.network_load.interval) + self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) + while 'prev' not in common.network_load.interfaces.get('eth0', {}): + sleep(0.1) + self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) + + l = [0, 0] + + def gb2(interface): + l[0] += 1200 + l[1] += 2400 + return tuple(l) + f[0] = gb2 + + while not common.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]: + sleep(0.1) + self.assertEqual(common.network_load(pl=pl, interface='eth0'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'DL 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, + {'divider_highlight_group': 'background:divider', 'contents': 'UL 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, + ]) + self.assertEqual(common.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, + {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, + ]) + self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'r 1 Kibps', 'highlight_group': ['network_load_recv', 'network_load']}, + {'divider_highlight_group': 'background:divider', 'contents': 's 2 Kibps', 'highlight_group': ['network_load_sent', 'network_load']}, + ]) + self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'r 1 kB/s', 'highlight_group': ['network_load_recv', 'network_load']}, + {'divider_highlight_group': 'background:divider', 'contents': 's 2 kB/s', 'highlight_group': ['network_load_sent', 'network_load']}, + ]) + self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100}, + {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, + ]) + + class ApproxEqual(object): + def __eq__(self, i): + return abs(i - 50.0) < 1 + + self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [ + {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, + {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()}, + ]) + finally: + common.network_load.shutdown() + + +class TestEnv(TestCommon): + module_name = 'env' + def test_user(self): new_os = new_module('os', getpid=lambda: 1) @@ -324,54 +449,30 @@ class TestCommon(TestCase): def username(self): return 'def' - if hasattr(common, 'psutil') and not callable(common.psutil.Process.username): + if hasattr(self.module, 'psutil') and not callable(self.module.psutil.Process.username): username = property(username) + struct_passwd = namedtuple('struct_passwd', ('pw_name',)) new_psutil = new_module('psutil', Process=Process) + new_pwd = new_module('pwd', getpwuid=lambda uid: struct_passwd(pw_name='def')) + new_getpass = new_module('getpass', getuser=lambda: 'def') pl = Pl() - with replace_env('USER', 'def') as segment_info: - common.username = False - with replace_attr(common, 'os', new_os): - with replace_attr(common, 'psutil', new_psutil): - with replace_attr(common, '_geteuid', lambda: 5): - self.assertEqual(common.user(pl=pl, segment_info=segment_info), [ - {'contents': 'def', 'highlight_group': ['user']} - ]) - self.assertEqual(common.user(pl=pl, segment_info=segment_info, hide_user='abc'), [ - {'contents': 'def', 'highlight_group': ['user']} - ]) - self.assertEqual(common.user(pl=pl, segment_info=segment_info, hide_user='def'), None) - with replace_attr(common, '_geteuid', lambda: 0): - self.assertEqual(common.user(pl=pl, segment_info=segment_info), [ - {'contents': 'def', 'highlight_group': ['superuser', 'user']} - ]) - - def test_branch(self): - pl = Pl() - create_watcher = get_fallback_create_watcher() - segment_info = {'getcwd': os.getcwd} - branch = partial(common.branch, pl=pl, create_watcher=create_watcher) - with replace_attr(common, 'guess', get_dummy_guess(status=lambda: None, directory='/tmp/tests')): - with replace_attr(common, 'tree_status', lambda repo, pl: None): - self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ - {'highlight_group': ['branch'], 'contents': 'tests'} - ]) - self.assertEqual(branch(segment_info=segment_info, status_colors=True), [ - {'contents': 'tests', 'highlight_group': ['branch_clean', 'branch']} - ]) - with replace_attr(common, 'guess', get_dummy_guess(status=lambda: 'D ', directory='/tmp/tests')): - with replace_attr(common, 'tree_status', lambda repo, pl: 'D '): - self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ - {'highlight_group': ['branch'], 'contents': 'tests'} - ]) - self.assertEqual(branch(segment_info=segment_info, status_colors=True), [ - {'contents': 'tests', 'highlight_group': ['branch_dirty', 'branch']} - ]) - self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ - {'highlight_group': ['branch'], 'contents': 'tests'} - ]) - with replace_attr(common, 'guess', lambda path, create_watcher: None): - self.assertEqual(branch(segment_info=segment_info, status_colors=False), None) + with replace_attr(self.module, 'pwd', new_pwd): + with replace_attr(self.module, 'getpass', new_getpass): + with replace_attr(self.module, 'os', new_os): + with replace_attr(self.module, 'psutil', new_psutil): + with replace_attr(self.module, '_geteuid', lambda: 5): + self.assertEqual(common.user(pl=pl), [ + {'contents': 'def', 'highlight_group': ['user']} + ]) + self.assertEqual(common.user(pl=pl, hide_user='abc'), [ + {'contents': 'def', 'highlight_group': ['user']} + ]) + self.assertEqual(common.user(pl=pl, hide_user='def'), None) + with replace_attr(self.module, '_geteuid', lambda: 0): + self.assertEqual(common.user(pl=pl), [ + {'contents': 'def', 'highlight_group': ['superuser', 'user']} + ]) def test_cwd(self): new_os = new_module('os', path=os.path, sep='/') @@ -386,7 +487,7 @@ class TestCommon(TestCase): return wd segment_info = {'getcwd': getcwd, 'home': None} - with replace_attr(common, 'os', new_os): + with replace_attr(self.module, 'os', new_os): cwd[0] = '/abc/def/ghi/foo/bar' self.assertEqual(common.cwd(pl=pl, segment_info=segment_info), [ {'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True}, @@ -472,16 +573,67 @@ class TestCommon(TestCase): cwd[0] = ValueError() self.assertRaises(ValueError, common.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2) + def test_virtualenv(self): + pl = Pl() + with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info: + self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), 'ghi') + segment_info['environ'].pop('VIRTUAL_ENV') + self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), None) + + def test_environment(self): + pl = Pl() + variable = 'FOO' + value = 'bar' + with replace_env(variable, value) as segment_info: + self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), value) + segment_info['environ'].pop(variable) + self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), None) + + +class TestVcs(TestCommon): + module_name = 'vcs' + + def test_branch(self): + pl = Pl() + create_watcher = get_fallback_create_watcher() + segment_info = {'getcwd': os.getcwd} + branch = partial(common.branch, pl=pl, create_watcher=create_watcher) + with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: None, directory='/tmp/tests')): + with replace_attr(self.module, 'tree_status', lambda repo, pl: None): + self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ + {'highlight_group': ['branch'], 'contents': 'tests'} + ]) + self.assertEqual(branch(segment_info=segment_info, status_colors=True), [ + {'contents': 'tests', 'highlight_group': ['branch_clean', 'branch']} + ]) + with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: 'D ', directory='/tmp/tests')): + with replace_attr(self.module, 'tree_status', lambda repo, pl: 'D '): + self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ + {'highlight_group': ['branch'], 'contents': 'tests'} + ]) + self.assertEqual(branch(segment_info=segment_info, status_colors=True), [ + {'contents': 'tests', 'highlight_group': ['branch_dirty', 'branch']} + ]) + self.assertEqual(branch(segment_info=segment_info, status_colors=False), [ + {'highlight_group': ['branch'], 'contents': 'tests'} + ]) + with replace_attr(self.module, 'guess', lambda path, create_watcher: None): + self.assertEqual(branch(segment_info=segment_info, status_colors=False), None) + + +class TestTime(TestCommon): + module_name = 'time' + def test_date(self): pl = Pl() - with replace_attr(common, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))): + with replace_attr(self.module, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))): self.assertEqual(common.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_group': ['date'], 'divider_highlight_group': None}]) self.assertEqual(common.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_group': ['time', 'date'], 'divider_highlight_group': 'time:divider'}]) def test_fuzzy_time(self): time = Args(hour=0, minute=45) pl = Pl() - with replace_attr(common, 'datetime', Args(now=lambda: time)): + with replace_attr(self.module, 'datetime', Args(now=lambda: time)): self.assertEqual(common.fuzzy_time(pl=pl), 'quarter to one') time.hour = 23 time.minute = 59 @@ -499,19 +651,18 @@ class TestCommon(TestCase): time.minute = 60 self.assertEqual(common.fuzzy_time(pl=pl, unicode_text=True), 'twelve o’clock') - def test_external_ip(self): - pl = Pl() - with replace_attr(common, 'urllib_read', urllib_read): - self.assertEqual(common.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}]) + +class TestSys(TestCommon): + module_name = 'sys' def test_uptime(self): pl = Pl() - with replace_attr(common, '_get_uptime', lambda: 259200): + with replace_attr(self.module, '_get_uptime', lambda: 259200): self.assertEqual(common.uptime(pl=pl), [{'contents': '3d', 'divider_highlight_group': 'background:divider'}]) - with replace_attr(common, '_get_uptime', lambda: 93784): + with replace_attr(self.module, '_get_uptime', lambda: 93784): self.assertEqual(common.uptime(pl=pl), [{'contents': '1d 2h 3m', 'divider_highlight_group': 'background:divider'}]) self.assertEqual(common.uptime(pl=pl, shorten_len=4), [{'contents': '1d 2h 3m 4s', 'divider_highlight_group': 'background:divider'}]) - with replace_attr(common, '_get_uptime', lambda: 65536): + with replace_attr(self.module, '_get_uptime', lambda: 65536): self.assertEqual(common.uptime(pl=pl), [{'contents': '18h 12m 16s', 'divider_highlight_group': 'background:divider'}]) self.assertEqual(common.uptime(pl=pl, shorten_len=2), [{'contents': '18h 12m', 'divider_highlight_group': 'background:divider'}]) self.assertEqual(common.uptime(pl=pl, shorten_len=1), [{'contents': '18h', 'divider_highlight_group': 'background:divider'}]) @@ -519,12 +670,45 @@ class TestCommon(TestCase): def _get_uptime(): raise NotImplementedError - with replace_attr(common, '_get_uptime', _get_uptime): + with replace_attr(self.module, '_get_uptime', _get_uptime): self.assertEqual(common.uptime(pl=pl), None) + def test_system_load(self): + pl = Pl() + with replace_module_module(self.module, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)): + with replace_attr(self.module, '_cpu_count', lambda: 2): + self.assertEqual(common.system_load(pl=pl), [ + {'contents': '7.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, + {'contents': '3.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}, + {'contents': '1.5', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0} + ]) + self.assertEqual(common.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1), [ + {'contents': '8 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, + {'contents': '4 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, + {'contents': '2', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0} + ]) + + def test_cpu_load_percent(self): + pl = Pl() + with replace_module_module(self.module, 'psutil', cpu_percent=lambda **kwargs: 52.3): + self.assertEqual(common.cpu_load_percent(pl=pl), [{ + 'contents': '52%', + 'gradient_level': 52.3, + 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], + }]) + self.assertEqual(common.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{ + 'contents': '52.3%', + 'gradient_level': 52.3, + 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], + }]) + + +class TestWthr(TestCommon): + module_name = 'wthr' + def test_weather(self): pl = Pl() - with replace_attr(common, 'urllib_read', urllib_read): + with replace_attr(self.module, 'urllib_read', urllib_read): self.assertEqual(common.weather(pl=pl), [ {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '}, {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0} @@ -557,132 +741,45 @@ class TestCommon(TestCase): {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '}, {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9.0e+00C', 'gradient_level': 30.0} ]) + with replace_attr(self.module, 'urllib_read', urllib_read): + common.weather.startup(pl=pl, location_query='Meppen,06,DE') + self.assertEqual(common.weather(pl=pl), [ + {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '}, + {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '-9°C', 'gradient_level': 30.0} + ]) + self.assertEqual(common.weather(pl=pl, location_query='Moscow,RU'), [ + {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_condition_partly_cloudy_day', 'weather_condition_cloudy', 'weather_conditions', 'weather'], 'contents': 'CLOUDS '}, + {'divider_highlight_group': 'background:divider', 'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '19°C', 'gradient_level': 70.0} + ]) + common.weather.shutdown() - def test_system_load(self): - pl = Pl() - with replace_module_module(common, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)): - with replace_attr(common, '_cpu_count', lambda: 2): - self.assertEqual(common.system_load(pl=pl), [ - {'contents': '7.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, - {'contents': '3.5 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}, - {'contents': '1.5', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0} - ]) - self.assertEqual(common.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1), [ - {'contents': '8 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, - {'contents': '4 ', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100}, - {'contents': '2', 'highlight_group': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0} - ]) - def test_cpu_load_percent(self): - pl = Pl() - with replace_module_module(common, 'psutil', cpu_percent=lambda **kwargs: 52.3): - self.assertEqual(common.cpu_load_percent(pl=pl), [{ - 'contents': '52%', - 'gradient_level': 52.3, - 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], - }]) - self.assertEqual(common.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{ - 'contents': '52.3%', - 'gradient_level': 52.3, - 'highlight_group': ['cpu_load_percent_gradient', 'cpu_load_percent'], - }]) - - def test_network_load(self): - from time import sleep - - def gb(interface): - return None - - f = [gb] - - def _get_bytes(interface): - return f[0](interface) - - pl = Pl() - - with replace_attr(common, '_get_bytes', _get_bytes): - common.network_load.startup(pl=pl) - try: - self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) - sleep(common.network_load.interval) - self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) - while 'prev' not in common.network_load.interfaces.get('eth0', {}): - sleep(0.1) - self.assertEqual(common.network_load(pl=pl, interface='eth0'), None) - - l = [0, 0] - - def gb2(interface): - l[0] += 1200 - l[1] += 2400 - return tuple(l) - f[0] = gb2 - - while not common.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]: - sleep(0.1) - self.assertEqual(common.network_load(pl=pl, interface='eth0'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'DL 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, - {'divider_highlight_group': 'background:divider', 'contents': 'UL 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, - ]) - self.assertEqual(common.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, - {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, - ]) - self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'r 1 Kibps', 'highlight_group': ['network_load_recv', 'network_load']}, - {'divider_highlight_group': 'background:divider', 'contents': 's 2 Kibps', 'highlight_group': ['network_load_sent', 'network_load']}, - ]) - self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'r 1 kB/s', 'highlight_group': ['network_load_recv', 'network_load']}, - {'divider_highlight_group': 'background:divider', 'contents': 's 2 kB/s', 'highlight_group': ['network_load_sent', 'network_load']}, - ]) - self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100}, - {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent', 'network_load']}, - ]) - - class ApproxEqual(object): - def __eq__(self, i): - return abs(i - 50.0) < 1 - - self.assertEqual(common.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [ - {'divider_highlight_group': 'background:divider', 'contents': 'r 1 KiB/s', 'highlight_group': ['network_load_recv', 'network_load']}, - {'divider_highlight_group': 'background:divider', 'contents': 's 2 KiB/s', 'highlight_group': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()}, - ]) - finally: - common.network_load.shutdown() - - def test_virtualenv(self): - pl = Pl() - with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info: - self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), 'ghi') - segment_info['environ'].pop('VIRTUAL_ENV') - self.assertEqual(common.virtualenv(pl=pl, segment_info=segment_info), None) - - def test_environment(self): - pl = Pl() - variable = 'FOO' - value = 'bar' - with replace_env(variable, value) as segment_info: - self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), value) - segment_info['environ'].pop(variable) - self.assertEqual(common.environment(pl=pl, segment_info=segment_info, variable=variable), None) +class TestMail(TestCommon): + module_name = 'mail' def test_email_imap_alert(self): # TODO pass + +class TestPlayers(TestCommon): + module_name = 'players' + def test_now_playing(self): # TODO pass + +class TestBat(TestCommon): + module_name = 'bat' + def test_battery(self): pl = Pl() def _get_capacity(pl): return 86 - with replace_attr(common, '_get_capacity', _get_capacity): + with replace_attr(self.module, '_get_capacity', _get_capacity): self.assertEqual(common.battery(pl=pl), [{ 'contents': '86%', 'highlight_group': ['battery_gradient', 'battery'], @@ -727,52 +824,6 @@ class TestCommon(TestCase): } ]) - def test_internal_ip(self): - try: - import netifaces - except ImportError: - raise SkipTest() - pl = Pl() - addr = { - 'enp2s0': { - netifaces.AF_INET: [{'addr': '192.168.100.200'}], - netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777%enp2s0'}] - }, - 'lo': { - netifaces.AF_INET: [{'addr': '127.0.0.1'}], - netifaces.AF_INET6: [{'addr': '::1'}] - }, - 'teredo': { - netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777'}] - }, - } - interfaces = ['lo', 'enp2s0', 'teredo'] - with replace_module_module( - common, 'netifaces', - interfaces=(lambda: interfaces), - ifaddresses=(lambda interface: addr[interface]), - AF_INET=netifaces.AF_INET, - AF_INET6=netifaces.AF_INET6, - ): - self.assertEqual(common.internal_ip(pl=pl), '192.168.100.200') - self.assertEqual(common.internal_ip(pl=pl, interface='detect'), '192.168.100.200') - self.assertEqual(common.internal_ip(pl=pl, interface='lo'), '127.0.0.1') - self.assertEqual(common.internal_ip(pl=pl, interface='teredo'), None) - self.assertEqual(common.internal_ip(pl=pl, ipv=4), '192.168.100.200') - self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=4), '192.168.100.200') - self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=4), '127.0.0.1') - self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=4), None) - self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0') - self.assertEqual(common.internal_ip(pl=pl, interface='detect', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0') - self.assertEqual(common.internal_ip(pl=pl, interface='lo', ipv=6), '::1') - self.assertEqual(common.internal_ip(pl=pl, interface='teredo', ipv=6), 'feff::5446:5eff:fe5a:7777') - interfaces[1:2] = () - self.assertEqual(common.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777') - interfaces[1:2] = () - self.assertEqual(common.internal_ip(pl=pl, ipv=6), '::1') - interfaces[:] = () - self.assertEqual(common.internal_ip(pl=pl, ipv=6), None) - class TestVim(TestCase): def test_mode(self): diff --git a/tests/test_shells/input.zsh b/tests/test_shells/input.zsh index b4fa6487..4aa6789d 100644 --- a/tests/test_shells/input.zsh +++ b/tests/test_shells/input.zsh @@ -1,5 +1,7 @@ +unset HOME unsetopt promptsp notransientrprompt setopt interactivecomments +setopt autonamedirs # POWERLINE_CONFIG_PATH=$PWD/powerline/config_files # POWERLINE_THEME_CONFIG=( default_leftonly.segment_data.hostname.args.only_if_ssh=false ) # POWERLINE_CONFIG=( ext.shell.theme=default_leftonly ) @@ -11,7 +13,7 @@ export VIRTUAL_ENV= cd tests/shell/3rd cd .git cd .. -VIRTUAL_ENV="$HOME/.virtenvs/some-virtual-environment" +VIRTUAL_ENV="/home/USER/.virtenvs/some-virtual-environment" VIRTUAL_ENV= bgscript.sh & waitpid.sh false @@ -38,6 +40,8 @@ do break done 1 +cd . +cd . hash -d foo=$PWD:h ; cd . POWERLINE_COMMAND="$POWERLINE_COMMAND -t default.dividers.left.hard=\$ABC" true diff --git a/tests/test_shells/test.sh b/tests/test_shells/test.sh index 216ed4fc..fb4c4984 100755 --- a/tests/test_shells/test.sh +++ b/tests/test_shells/test.sh @@ -183,6 +183,7 @@ mkdir tests/shell/path ln -s "$(which "${PYTHON}")" tests/shell/path/python ln -s "$(which screen)" tests/shell/path ln -s "$(which env)" tests/shell/path +ln -s "$(which git)" tests/shell/path ln -s "$(which sleep)" tests/shell/path ln -s "$(which cat)" tests/shell/path ln -s "$(which false)" tests/shell/path @@ -332,12 +333,12 @@ if test -z "${ONLY_SHELL}" || test "x${ONLY_SHELL%sh}" != "x${ONLY_SHELL}" || te done fi -if ! $PYTHON scripts/powerline-daemon -s$ADDRESS > tests/shell/daemon_log_2 2>&1 ; then - echo "Daemon exited with status $?" - FAILED=1 -else +if $PYTHON scripts/powerline-daemon -s$ADDRESS > tests/shell/daemon_log_2 2>&1 ; then sleep 1 $PYTHON scripts/powerline-daemon -s$ADDRESS -k +else + echo "Daemon exited with status $?" + FAILED=1 fi if ! test -z "$(cat tests/shell/daemon_log_2)" ; then diff --git a/tests/test_shells/zsh.daemon.ok b/tests/test_shells/zsh.daemon.ok index d0259c4a..2a431ec0 100644 --- a/tests/test_shells/zsh.daemon.ok +++ b/tests/test_shells/zsh.daemon.ok @@ -1,7 +1,7 @@   HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  cd .git   HOSTNAME  USER   BRANCH  ⋯  shell  3rd  .git  cd .. -  HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV="$HOME/.virtenvs/some-virtual-environment" +  HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV="/home/USER/.virtenvs/some-virtual-environment"   HOSTNAME  USER  ⓔ  some-virtual-environment   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV=   HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  bgscript.sh & waitpid.sh [1] PID @@ -33,6 +33,8 @@ abc 1) def 2) ghi 3) jkl                    Select variant  1 def + INSERT  ⋯  tests  shell  3rd  cd . + INSERT  ⋯  tests  shell  3rd  cd .  INSERT  ⋯  tests  shell  3rd  hash -d foo=$PWD:h ; cd .  INSERT  ~foo  3rd  POWERLINE_COMMAND="$POWERLINE_COMMAND -t default.dividers.left.hard=\$ABC"  INSERT $ABC~foo  3rd $ABCtrue diff --git a/tests/test_shells/zsh.nodaemon.ok b/tests/test_shells/zsh.nodaemon.ok index f3edbcdc..5a4c75d2 100644 --- a/tests/test_shells/zsh.nodaemon.ok +++ b/tests/test_shells/zsh.nodaemon.ok @@ -1,7 +1,7 @@   HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  cd .git   HOSTNAME  USER   BRANCH  ⋯  shell  3rd  .git  cd .. -  HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV="$HOME/.virtenvs/some-virtual-environment" +  HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV="/home/USER/.virtenvs/some-virtual-environment"   HOSTNAME  USER  ⓔ  some-virtual-environment   BRANCH  ⋯  tests  shell  3rd  VIRTUAL_ENV=   HOSTNAME  USER   BRANCH  ⋯  tests  shell  3rd  bgscript.sh & waitpid.sh [1] PID @@ -33,6 +33,8 @@ abc 1) def 2) ghi 3) jkl  Select variant  1 def + INSERT  ⋯  tests  shell  3rd  cd . + INSERT  ⋯  tests  shell  3rd  cd .  INSERT  ⋯  tests  shell  3rd  hash -d foo=$PWD:h ; cd .  INSERT  ~foo  3rd  POWERLINE_COMMAND="$POWERLINE_COMMAND -t default.dividers.left.hard=\$ABC"  INSERT $ABC~foo  3rd $ABCtrue diff --git a/tests/test_watcher.py b/tests/test_watcher.py index 956faa7a..dce37bae 100644 --- a/tests/test_watcher.py +++ b/tests/test_watcher.py @@ -6,6 +6,7 @@ import os from time import sleep from functools import partial +from errno import ENOENT from powerline.lib.watcher import create_file_watcher, create_tree_watcher, INotifyError from powerline.lib.watcher.uv import UvNotFound @@ -26,6 +27,67 @@ def clear_dir(dir): os.rmdir(os.path.join(root, d)) +def set_watcher_tests(l): + byte_tests = (('bytes', True), ('unicode', False)) + + for btn, use_bytes in byte_tests: + def test_inotify_file_watcher(self, use_bytes=use_bytes): + try: + w = create_file_watcher(pl=get_fallback_logger(), watcher_type='inotify') + except INotifyError: + raise SkipTest('This test is not suitable for a stat based file watcher') + self.do_test_file_watcher(w, use_bytes) + + def test_uv_file_watcher(self, use_bytes=use_bytes): + raise SkipTest('Uv watcher tests are not stable') + try: + w = create_file_watcher(pl=get_fallback_logger(), watcher_type='uv') + except UvNotFound: + raise SkipTest('Pyuv is not available') + self.do_test_file_watcher(w, use_bytes) + + def test_inotify_tree_watcher(self, use_bytes=use_bytes): + try: + tw = create_tree_watcher(get_fallback_logger(), watcher_type='inotify') + except INotifyError: + raise SkipTest('INotify is not available') + self.do_test_tree_watcher(tw, use_bytes) + + def test_uv_tree_watcher(self, use_bytes=use_bytes): + raise SkipTest('Uv watcher tests are not stable') + try: + tw = create_tree_watcher(get_fallback_logger(), 'uv') + except UvNotFound: + raise SkipTest('Pyuv is not available') + self.do_test_tree_watcher(tw, use_bytes) + + def test_inotify_file_watcher_is_watching(self, use_bytes=use_bytes): + try: + w = create_file_watcher(pl=get_fallback_logger(), watcher_type='inotify') + except INotifyError: + raise SkipTest('INotify is not available') + self.do_test_file_watcher_is_watching(w, use_bytes) + + def test_stat_file_watcher_is_watching(self, use_bytes=use_bytes): + w = create_file_watcher(pl=get_fallback_logger(), watcher_type='stat') + self.do_test_file_watcher_is_watching(w, use_bytes) + + def test_uv_file_watcher_is_watching(self, use_bytes=use_bytes): + try: + w = create_file_watcher(pl=get_fallback_logger(), watcher_type='uv') + except UvNotFound: + raise SkipTest('Pyuv is not available') + self.do_test_file_watcher_is_watching(w, use_bytes) + + for wt in ('uv', 'inotify'): + l['test_{0}_file_watcher_{1}'.format(wt, btn)] = locals()['test_{0}_file_watcher'.format(wt)] + l['test_{0}_tree_watcher_{1}'.format(wt, btn)] = locals()['test_{0}_tree_watcher'.format(wt)] + l['test_{0}_file_watcher_is_watching_{1}'.format(wt, btn)] = ( + locals()['test_{0}_file_watcher_is_watching'.format(wt)]) + l['test_{0}_file_watcher_is_watching_{1}'.format('stat', btn)] = ( + locals()['test_{0}_file_watcher_is_watching'.format('stat')]) + + class TestFilesystemWatchers(TestCase): def do_test_for_change(self, watcher, path): st = monotonic() @@ -35,21 +97,19 @@ class TestFilesystemWatchers(TestCase): sleep(0.1) self.fail('The change to {0} was not detected'.format(path)) - def test_file_watcher(self): - try: - w = create_file_watcher(pl=get_fallback_logger(), watcher_type='inotify') - except INotifyError: - raise SkipTest('This test is not suitable for a stat based file watcher') - return self.do_test_file_watcher(w) - - def do_test_file_watcher(self, w): + def do_test_file_watcher(self, w, use_bytes=False): try: f1, f2, f3 = map(lambda x: os.path.join(INOTIFY_DIR, 'file%d' % x), (1, 2, 3)) + ne = os.path.join(INOTIFY_DIR, 'notexists') + if use_bytes: + f1 = f1.encode('utf-8') + f2 = f2.encode('utf-8') + f3 = f3.encode('utf-8') + ne = ne.encode('utf-8') with open(f1, 'wb'): with open(f2, 'wb'): with open(f3, 'wb'): pass - ne = os.path.join(INOTIFY_DIR, 'notexists') self.assertRaises(OSError, w, ne) self.assertTrue(w(f1)) self.assertTrue(w(f2)) @@ -86,88 +146,75 @@ class TestFilesystemWatchers(TestCase): finally: clear_dir(INOTIFY_DIR) - def test_uv_file_watcher(self): - raise SkipTest('Uv watcher tests are not stable') + def do_test_tree_watcher(self, tw, use_bytes=False): try: - w = create_file_watcher(pl=get_fallback_logger(), watcher_type='uv') - except UvNotFound: - raise SkipTest('Pyuv is not available') - return self.do_test_file_watcher(w) - - def test_tree_watcher(self): - tw = create_tree_watcher(get_fallback_logger()) - return self.do_test_tree_watcher(tw) - - def do_test_tree_watcher(self, tw): - try: - subdir = os.path.join(INOTIFY_DIR, 'subdir') + inotify_dir = INOTIFY_DIR + subdir = os.path.join(inotify_dir, 'subdir') + t1 = os.path.join(inotify_dir, 'tree1') + ts1 = os.path.join(subdir, 'tree1') + suffix = '1' + f = os.path.join(subdir, 'f') + if use_bytes: + inotify_dir = inotify_dir.encode('utf-8') + subdir = subdir.encode('utf-8') + t1 = t1.encode('utf-8') + ts1 = ts1.encode('utf-8') + suffix = suffix.encode('utf-8') + f = f.encode('utf-8') os.mkdir(subdir) try: - if tw.watch(INOTIFY_DIR).is_dummy: + if tw.watch(inotify_dir).is_dummy: raise SkipTest('No tree watcher available') except UvNotFound: raise SkipTest('Pyuv is not available') - self.assertTrue(tw(INOTIFY_DIR)) - self.assertFalse(tw(INOTIFY_DIR)) - changed = partial(self.do_test_for_change, tw, INOTIFY_DIR) - open(os.path.join(INOTIFY_DIR, 'tree1'), 'w').close() + except INotifyError: + raise SkipTest('INotify is not available') + self.assertTrue(tw(inotify_dir)) + self.assertFalse(tw(inotify_dir)) + changed = partial(self.do_test_for_change, tw, inotify_dir) + open(t1, 'w').close() changed() - open(os.path.join(subdir, 'tree1'), 'w').close() + open(ts1, 'w').close() changed() - os.unlink(os.path.join(subdir, 'tree1')) + os.unlink(ts1) changed() os.rmdir(subdir) changed() os.mkdir(subdir) changed() - os.rename(subdir, subdir + '1') + os.rename(subdir, subdir + suffix) changed() - shutil.rmtree(subdir + '1') + shutil.rmtree(subdir + suffix) changed() os.mkdir(subdir) - f = os.path.join(subdir, 'f') open(f, 'w').close() changed() with open(f, 'a') as s: s.write(' ') changed() - os.rename(f, f + '1') + os.rename(f, f + suffix) changed() finally: - clear_dir(INOTIFY_DIR) + clear_dir(inotify_dir) - def test_uv_tree_watcher(self): - raise SkipTest('Uv watcher tests are not stable') - tw = create_tree_watcher(get_fallback_logger(), 'uv') - return self.do_test_tree_watcher(tw) - - def test_inotify_file_watcher_is_watching(self): - try: - w = create_file_watcher(pl=get_fallback_logger(), watcher_type='inotify') - except INotifyError: - raise SkipTest('INotify is not available') - return self.do_test_file_watcher_is_watching(w) - - def test_stat_file_watcher_is_watching(self): - w = create_file_watcher(pl=get_fallback_logger(), watcher_type='stat') - return self.do_test_file_watcher_is_watching(w) - - def test_uv_file_watcher_is_watching(self): - try: - w = create_file_watcher(pl=get_fallback_logger(), watcher_type='uv') - except UvNotFound: - raise SkipTest('Pyuv is not available') - return self.do_test_file_watcher_is_watching(w) - - def do_test_file_watcher_is_watching(self, w): + def do_test_file_watcher_is_watching(self, w, use_bytes=False): try: f1, f2, f3 = map(lambda x: os.path.join(INOTIFY_DIR, 'file%d' % x), (1, 2, 3)) + ne = os.path.join(INOTIFY_DIR, 'notexists') + if use_bytes: + f1 = f1.encode('utf-8') + f2 = f2.encode('utf-8') + f3 = f3.encode('utf-8') + ne = ne.encode('utf-8') with open(f1, 'wb'): with open(f2, 'wb'): with open(f3, 'wb'): pass - ne = os.path.join(INOTIFY_DIR, 'notexists') self.assertRaises(OSError, w, ne) + try: + w(ne) + except OSError as e: + self.assertEqual(e.errno, ENOENT) self.assertTrue(w(f1)) self.assertFalse(w.is_watching(ne)) self.assertTrue(w.is_watching(f1)) @@ -175,6 +222,8 @@ class TestFilesystemWatchers(TestCase): finally: clear_dir(INOTIFY_DIR) + set_watcher_tests(locals()) + old_cwd = None diff --git a/tests/vim.py b/tests/vim.py index d8ac109f..b25759ce 100644 --- a/tests/vim.py +++ b/tests/vim.py @@ -651,6 +651,7 @@ def _get_segment_info(): 'tabnr': tabpage.number, 'window_id': window._window_id, 'mode': mode, + 'encoding': options['encoding'], }