id
stringlengths 12
102
| prompt
stringlengths 242
11.5M
| relative_path
stringlengths 12
89
|
|---|---|---|
benedict.utils.type_util.is_json_serializable
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: python-benedict/benedict/utils/type_util.py
import pathlib
import re
from datetime import datetime
from decimal import Decimal
def is_json_serializable(val):
|
python-benedict/benedict/utils/type_util.py
|
feedparser.urls.convert_to_idn
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: feedparser/feedparser/urls.py
import re
import urllib.parse
from .html import _BaseHTMLProcessor
def convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
|
feedparser/feedparser/urls.py
|
mistune.toc.add_toc_hook
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE mistune/src/mistune/toc.py
def render_toc_ul(toc):
"""Render a <ul> table of content HTML. The param "toc" should
be formatted into this structure::
[
(level, id, text),
]
For example::
[
(1, 'toc-intro', 'Introduction'),
(2, 'toc-install', 'Install'),
(2, 'toc-upgrade', 'Upgrade'),
(1, 'toc-license', 'License'),
]
"""
if not toc:
return ''
s = '<ul>\n'
levels = []
for level, k, text in toc:
item = '<a href="#{}">{}</a>'.format(k, text)
if not levels:
s += '<li>' + item
levels.append(level)
elif level == levels[-1]:
s += '</li>\n<li>' + item
elif level > levels[-1]:
s += '\n<ul>\n<li>' + item
levels.append(level)
else:
levels.pop()
while levels:
last_level = levels.pop()
if level == last_level:
s += '</li>\n</ul>\n</li>\n<li>' + item
levels.append(level)
break
elif level > last_level:
s += '</li>\n<li>' + item
levels.append(last_level)
levels.append(level)
break
else:
s += '</li>\n</ul>\n'
else:
levels.append(level)
s += '</li>\n<li>' + item
while len(levels) > 1:
s += '</li>\n</ul>\n'
levels.pop()
return s + '</li>\n</ul>\n'
# FILE mistune/src/mistune/toc.py
def normalize_toc_item(md, token):
text = token['text']
tokens = md.inline(text, {})
html = md.renderer(tokens, {})
text = striptags(html)
attrs = token['attrs']
return attrs['level'], attrs['id'], text
# FILE mistune/src/mistune/util.py
def striptags(s: str):
return _striptags_re.sub('', s)
Based on the information above, please complete the function:
#CURRENT_FILE: mistune/src/mistune/toc.py
from .util import striptags
def add_toc_hook(md, min_level=1, max_level=3, heading_id=None):
"""Add a hook to save toc items into ``state.env``. This is
usually helpful for doc generator::
import mistune
from mistune.toc import add_toc_hook, render_toc_ul
md = mistune.create_markdown(...)
add_toc_hook(md)
html, state = md.parse(text)
toc_items = state.env['toc_items']
toc_html = render_toc_ul(toc_items)
:param md: Markdown instance
:param min_level: min heading level
:param max_level: max heading level
:param heading_id: a function to generate heading_id
"""
|
mistune/src/mistune/toc.py
|
mistune.plugins.table.table_in_quote
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE mistune/src/mistune/plugins/table.py
def table_in_list(md):
"""Enable table plugin in list."""
md.block.insert_rule(md.block.list_rules, 'table', before='paragraph')
md.block.insert_rule(md.block.list_rules, 'nptable', before='paragraph')
# FILE mistune/src/mistune/plugins/table.py
def table(md):
"""A mistune plugin to support table, spec defined at
https://michelf.ca/projects/php-markdown/extra/#table
Here is an example:
.. code-block:: text
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
:param md: Markdown instance
"""
md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph')
md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph')
if md.renderer and md.renderer.NAME == 'html':
md.renderer.register('table', render_table)
md.renderer.register('table_head', render_table_head)
md.renderer.register('table_body', render_table_body)
md.renderer.register('table_row', render_table_row)
md.renderer.register('table_cell', render_table_cell)
# FILE mistune/src/mistune/plugins/table.py
def render_table_head(renderer, text):
return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n'
# FILE mistune/src/mistune/plugins/table.py
def render_table_body(renderer, text):
return '<tbody>\n' + text + '</tbody>\n'
# FILE mistune/src/mistune/plugins/table.py
def render_table(renderer, text):
return '<table>\n' + text + '</table>\n'
# FILE mistune/src/mistune/plugins/table.py
def render_table_row(renderer, text):
return '<tr>\n' + text + '</tr>\n'
# FILE mistune/src/mistune/plugins/table.py
def render_table_cell(renderer, text, align=None, head=False):
if head:
tag = 'th'
else:
tag = 'td'
html = ' <' + tag
if align:
html += ' style="text-align:' + align + '"'
return html + '>' + text + '</' + tag + '>\n'
Based on the information above, please complete the function:
#CURRENT_FILE: mistune/src/mistune/plugins/table.py
import re
from ..helpers import PREVENT_BACKSLASH
def table_in_quote(md):
"""Enable table plugin in block quotes."""
|
mistune/src/mistune/plugins/table.py
|
mistune.plugins.table.table_in_list
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE mistune/src/mistune/plugins/table.py
def table_in_quote(md):
"""Enable table plugin in block quotes."""
md.block.insert_rule(md.block.block_quote_rules, 'table', before='paragraph')
md.block.insert_rule(md.block.block_quote_rules, 'nptable', before='paragraph')
# FILE mistune/src/mistune/plugins/table.py
def table(md):
"""A mistune plugin to support table, spec defined at
https://michelf.ca/projects/php-markdown/extra/#table
Here is an example:
.. code-block:: text
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
:param md: Markdown instance
"""
md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph')
md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph')
if md.renderer and md.renderer.NAME == 'html':
md.renderer.register('table', render_table)
md.renderer.register('table_head', render_table_head)
md.renderer.register('table_body', render_table_body)
md.renderer.register('table_row', render_table_row)
md.renderer.register('table_cell', render_table_cell)
Based on the information above, please complete the function:
#CURRENT_FILE: mistune/src/mistune/plugins/table.py
import re
from ..helpers import PREVENT_BACKSLASH
def table_in_list(md):
"""Enable table plugin in list."""
|
mistune/src/mistune/plugins/table.py
|
xmnlp.utils.parallel_handler
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: xmnlp/xmnlp/utils/__init__.py
import os
import re
import concurrent.futures as futures
from functools import partial
from typing import Any, Callable, List, Generator
import numpy as np
def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[
List[Any], None, None
]:
"""parallel handler
Args:
callback: callback function
texts: List[str]
n_jobs: int, pool size of threads
Return:
Generator[List[str]]
"""
|
xmnlp/xmnlp/utils/__init__.py
|
parsel.utils.shorten
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: parsel/parsel/utils.py
import re
from typing import Any, Iterable, Iterator, List, Match, Pattern, Union, cast
from w3lib.html import replace_entities as w3lib_replace_entities
def shorten(text: str, width: int, suffix: str = "...") -> str:
"""Truncate the given text to fit in the given width."""
|
parsel/parsel/utils.py
|
parsel.xpathfuncs.set_xpathfunc
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE parsel/parsel/xpathfuncs.py
def setup() -> None:
set_xpathfunc("has-class", has_class)
Based on the information above, please complete the function:
#CURRENT_FILE: parsel/parsel/xpathfuncs.py
import re
from typing import Any, Callable, Optional
from lxml import etree
from w3lib.html import HTML5_WHITESPACE
def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg]
"""Register a custom extension function to use in XPath expressions.
The function ``func`` registered under ``fname`` identifier will be called
for every matching node, being passed a ``context`` parameter as well as
any parameters passed from the corresponding XPath expression.
If ``func`` is ``None``, the extension function will be removed.
See more `in lxml documentation`_.
.. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions
"""
|
parsel/parsel/xpathfuncs.py
|
dominate.dom_tag._get_thread_context
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE dominate/dominate/dom_tag.py
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
c = get_current()
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
c.set_attribute(*dom_tag.clean_pair(attr, value))
Based on the information above, please complete the function:
#CURRENT_FILE: dominate/dominate/dom_tag.py
import copy
import numbers
from collections import defaultdict, namedtuple
from functools import wraps
import threading
from collections.abc import Callable
from collections import Callable
import greenlet
from . import util
def _get_thread_context():
|
dominate/dominate/dom_tag.py
|
dominate.util.system
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: dominate/dominate/util.py
import re
from .dom_tag import dom_tag
import subprocess
def system(cmd, data=None):
'''
pipes the output of a program
'''
|
dominate/dominate/util.py
|
dominate.util.url_unescape
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE dominate/dominate/util.py
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
# FILE dominate/dominate/util.py
def escape(data, quote=True): # stolen from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML document
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
Based on the information above, please complete the function:
#CURRENT_FILE: dominate/dominate/util.py
import re
from .dom_tag import dom_tag
import subprocess
def url_unescape(data):
|
dominate/dominate/util.py
|
rows.fields.DatetimeField.serialize
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE rows/rows/fields.py
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
# LIB six.py
def b(s):
return s
# LIB six.py
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
Based on the information above, please complete the function:
#CURRENT_FILE: rows/rows/fields.py
from __future__ import unicode_literals
import binascii
import datetime
import json
import locale
import re
from base64 import b64decode, b64encode
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from unicodedata import normalize
import six
from itertools import izip_longest as zip_longest
from itertools import zip_longest
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
rows/rows/fields.py
|
rows.fields.Field.serialize
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE rows/rows/fields.py
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
# FILE rows/rows/fields.py
def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
"""Detect column types (or "where the magic happens")"""
# TODO: look strategy of csv.Sniffer.has_header
# TODO: may receive 'type hints'
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields
# FILE rows/rows/fields.py
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
# FILE rows/rows/fields.py
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.isoformat())
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
value_error(value, cls)
else:
return datetime.datetime(*[int(x) for x in groups[0]])
# LIB six.py
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
# FILE rows/rows/fields.py
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
...
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
value_error(value, cls)
else:
return datetime.datetime(*[int(x) for x in groups[0]])
# FILE rows/rows/fields.py
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
def serialize(cls, value, *args, **kwargs):
...
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
# LIB six.py
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
# FILE rows/rows/fields.py
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = (int,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
# FILE rows/rows/fields.py
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.date,)
INPUT_FORMAT = "%Y-%m-%d"
OUTPUT_FORMAT = "%Y-%m-%d"
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
# FILE rows/rows/fields.py
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
...
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
# FILE rows/rows/fields.py
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
TYPE = (list, dict)
@classmethod
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
# FILE rows/rows/fields.py
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
def serialize(cls, value, *args, **kwargs):
...
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
# FILE rows/rows/fields.py
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = (float,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
# FILE rows/rows/fields.py
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
...
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
# FILE rows/rows/fields.py
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
# FILE rows/rows/fields.py
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
TYPE = (bool,)
SERIALIZED_VALUES = {True: "true", False: "false", None: ""}
TRUE_VALUES = ("true", "yes")
FALSE_VALUES = ("false", "no")
@classmethod
def serialize(cls, value, *args, **kwargs):
# TODO: should we serialize `None` as well or give it to the plugin?
return cls.SERIALIZED_VALUES[value]
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(BoolField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value).lower()
if value in cls.TRUE_VALUES:
return True
elif value in cls.FALSE_VALUES:
return False
else:
raise ValueError("Value is not boolean")
# FILE rows/rows/fields.py
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
# FILE rows/rows/fields.py
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = (six.binary_type,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
# FILE rows/rows/fields.py
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
# LIB six.py
def b(s):
return s
# LIB six.py
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
# FILE rows/rows/fields.py
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
elif value == Decimal("0"):
return "0.00%"
value = Decimal(six.text_type(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return "{}%".format(value)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
def serialize(cls, value, *args, **kwargs):
# TODO: should we serialize `None` as well or give it to the plugin?
return cls.SERIALIZED_VALUES[value]
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
value_as_string = six.text_type(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get("grouping", None)
has_decimal_places = value_as_string.find(".") != -1
if not has_decimal_places:
string_format = "%d"
else:
decimal_places = len(value_as_string.split(".")[1])
string_format = "%.{}f".format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
def deserialize(cls, value, *args, **kwargs):
...
# FILE rows/rows/fields.py
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.isoformat())
def deserialize(cls, value, *args, **kwargs):
...
Based on the information above, please complete the function:
#CURRENT_FILE: rows/rows/fields.py
from __future__ import unicode_literals
import binascii
import datetime
import json
import locale
import re
from base64 import b64decode, b64encode
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from unicodedata import normalize
import six
from itertools import izip_longest as zip_longest
from itertools import zip_longest
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
|
rows/rows/fields.py
|
rows.fields.EmailField.serialize
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE rows/rows/fields.py
def unique_values(values):
result = []
for value in values:
if not is_null(value) and value not in result:
result.append(value)
return result
# FILE rows/rows/fields.py
def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
"""Detect column types (or "where the magic happens")"""
# TODO: look strategy of csv.Sniffer.has_header
# TODO: may receive 'type hints'
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields
# FILE rows/rows/fields.py
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
# FILE rows/rows/fields.py
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
# FILE rows/rows/fields.py
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
# LIB six.py
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
Based on the information above, please complete the function:
#CURRENT_FILE: rows/rows/fields.py
from __future__ import unicode_literals
import binascii
import datetime
import json
import locale
import re
from base64 import b64decode, b64encode
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from unicodedata import normalize
import six
from itertools import izip_longest as zip_longest
from itertools import zip_longest
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
rows/rows/fields.py
|
rows.fields.as_string
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE rows/rows/fields.py
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
# FILE rows/rows/fields.py
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value)
def deserialize(cls, value, *args, **kwargs):
...
# LIB six.py
def b(s):
return s
# LIB six.py
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
Based on the information above, please complete the function:
#CURRENT_FILE: rows/rows/fields.py
from __future__ import unicode_literals
import binascii
import datetime
import json
import locale
import re
from base64 import b64decode, b64encode
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from unicodedata import normalize
import six
from itertools import izip_longest as zip_longest
from itertools import zip_longest
def as_string(value):
|
rows/rows/fields.py
|
rows.fields.get_items
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE rows/rows/fields.py
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
# LIB six.py
def b(s):
return s
# LIB six.py
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
# LIB six.py
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
Based on the information above, please complete the function:
#CURRENT_FILE: rows/rows/fields.py
from __future__ import unicode_literals
import binascii
import datetime
import json
import locale
import re
from base64 import b64decode, b64encode
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from unicodedata import normalize
import six
from itertools import izip_longest as zip_longest
from itertools import zip_longest
def get_items(*indexes):
"""Return a callable that fetches the given indexes of an object
Always return a tuple even when len(indexes) == 1.
Similar to `operator.itemgetter`, but will insert `None` when the object
does not have the desired index (instead of raising IndexError).
"""
|
rows/rows/fields.py
|
pycorrector.proper_corrector.load_dict_file
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
Based on the information above, please complete the function:
#CURRENT_FILE: pycorrector/pycorrector/proper_corrector.py
import os
from codecs import open
import pypinyin
from loguru import logger
from pycorrector import config
from pycorrector.utils.math_utils import edit_distance
from pycorrector.utils.ngram_util import NgramUtil
from pycorrector.utils.text_utils import is_chinese
from pycorrector.utils.tokenizer import segment, split_2_short_text
def load_dict_file(path):
"""
加载词典
:param path:
:return:
"""
|
pycorrector/pycorrector/proper_corrector.py
|
natasha.span.envelop_spans
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE natasha/natasha/span.py
def adapt_spans(spans):
for span in spans:
yield Span(span.start, span.stop, span.type)
# FILE natasha/natasha/span.py
class Span(Record):
__attributes__ = ['start', 'stop', 'type']
Based on the information above, please complete the function:
#CURRENT_FILE: natasha/natasha/span.py
from .record import Record
def envelop_spans(spans, envelopes):
|
natasha/natasha/span.py
|
googleapiclient._helpers.parse_unique_urlencoded
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE google-api-python-client/googleapiclient/_helpers.py
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
return update_query_params(url, {name: value})
# FILE google-api-python-client/googleapiclient/_helpers.py
def update_query_params(uri, params):
"""Updates a URI with new query parameters.
If a given key from ``params`` is repeated in the ``uri``, then
the URI will be considered invalid and an error will occur.
If the URI is valid, then each value from ``params`` will
replace the corresponding value in the query parameters (if
it exists).
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = urllib.parse.urlparse(uri)
query_params = parse_unique_urlencoded(parts.query)
query_params.update(params)
new_query = urllib.parse.urlencode(query_params)
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
Based on the information above, please complete the function:
#CURRENT_FILE: google-api-python-client/googleapiclient/_helpers.py
import functools
import inspect
import logging
import urllib
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
|
google-api-python-client/googleapiclient/_helpers.py
|
jinja2.async_utils.auto_aiter
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE Jinja2/src/jinja2/async_utils.py
async def auto_to_list(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> t.List["V"]:
return [x async for x in auto_aiter(value)]
# FILE Jinja2/src/jinja2/async_utils.py
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
# Avoid a costly call to isawaitable
if type(value) in _common_primitives:
return t.cast("V", value)
if inspect.isawaitable(value):
return await t.cast("t.Awaitable[V]", value)
return t.cast("V", value)
Based on the information above, please complete the function:
#CURRENT_FILE: Jinja2/src/jinja2/async_utils.py
import inspect
import typing as t
from functools import WRAPPER_ASSIGNMENTS
from functools import wraps
from .utils import _PassArg
from .utils import pass_eval_context
async def auto_aiter(
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> "t.AsyncIterator[V]":
|
Jinja2/src/jinja2/async_utils.py
|
jinja2.utils.consume
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE Jinja2/src/jinja2/utils.py
class LRUCache:
"""A simple LRU Cache implementation."""
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
...
def __getstate__(self) -> t.Mapping[str, t.Any]:
...
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
...
def __getnewargs__(self) -> t.Tuple:
...
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
...
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
...
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
...
def clear(self) -> None:
"""Clear the cache."""
...
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
...
def __len__(self) -> int:
"""Return the current size of the cache."""
...
def __repr__(self) -> str:
...
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
...
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
...
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
...
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
...
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
return [x[1] for x in self.items()]
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
yield from ()
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/utils.py
class LRUCache:
"""A simple LRU Cache implementation."""
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
...
def __getstate__(self) -> t.Mapping[str, t.Any]:
...
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
...
def __getnewargs__(self) -> t.Tuple:
...
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
...
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
...
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
...
def clear(self) -> None:
"""Clear the cache."""
...
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
...
def __len__(self) -> int:
"""Return the current size of the cache."""
...
def __repr__(self) -> str:
...
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
...
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
...
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
...
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
...
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
...
# FILE Jinja2/src/jinja2/utils.py
class LRUCache:
"""A simple LRU Cache implementation."""
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
...
def __getstate__(self) -> t.Mapping[str, t.Any]:
...
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
...
def __getnewargs__(self) -> t.Tuple:
...
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
...
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
...
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
...
def clear(self) -> None:
"""Clear the cache."""
...
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
...
def __len__(self) -> int:
"""Return the current size of the cache."""
...
def __repr__(self) -> str:
...
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
...
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
...
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
...
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
...
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
...
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
...
def __iter__(self) -> t.Iterator[t.Any]:
return reversed(tuple(self._queue))
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
...
# FILE Jinja2/src/jinja2/utils.py
class Cycler:
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
Similar to ``loop.cycle``, but can be used outside loops or across
multiple loops. For example, render a list of folders and files in a
list, alternating giving them "odd" and "even" classes.
.. code-block:: html+jinja
{% set row_class = cycler("odd", "even") %}
<ul class="browser">
{% for folder in folders %}
<li class="folder {{ row_class.next() }}">{{ folder }}
{% endfor %}
{% for file in files %}
<li class="file {{ row_class.next() }}">{{ file }}
{% endfor %}
</ul>
:param items: Each positional argument will be yielded in the order
given for each cycle.
.. versionadded:: 2.1
"""
def __init__(self, *items: t.Any) -> None:
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
def reset(self) -> None:
"""Resets the current item to the first item."""
...
def current(self) -> t.Any:
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
...
def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
Based on the information above, please complete the function:
#CURRENT_FILE: Jinja2/src/jinja2/utils.py
import enum
import json
import os
import re
import typing as t
from collections import abc
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from types import CodeType
from urllib.parse import quote_from_bytes
import markupsafe
import typing_extensions as te
from .runtime import Undefined
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
from pprint import pformat
from .constants import LOREM_IPSUM_WORDS
def consume(iterable: t.Iterable[t.Any]) -> None:
"""Consumes an iterable without doing anything with it."""
|
Jinja2/src/jinja2/utils.py
|
pycorrector.utils.tokenizer.segment
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE pycorrector/pycorrector/utils/tokenizer.py
def split_text_by_maxlen(text, maxlen=512):
"""
文本切分为句子,以句子maxlen切分
:param text: str
:param maxlen: int, 最大长度
:return: list, (sentence, idx)
"""
result = []
for i in range(0, len(text), maxlen):
result.append((text[i:i + maxlen], i))
return result
# FILE pycorrector/pycorrector/utils/tokenizer.py
class Tokenizer(object):
def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None):
self.model = jieba
jieba.setLogLevel("ERROR")
# 初始化大词典
if os.path.exists(dict_path):
self.model.set_dictionary(dict_path)
# 加载用户自定义词典
if custom_word_freq_dict:
for w, f in custom_word_freq_dict.items():
self.model.add_word(w, freq=f)
# 加载混淆集词典
if custom_confusion_dict:
for k, word in custom_confusion_dict.items():
# 添加到分词器的自定义词典中
self.model.add_word(k)
self.model.add_word(word)
def tokenize(self, unicode_sentence, mode="search"):
"""
切词并返回切词位置, search mode用于错误扩召回
:param unicode_sentence: query
:param mode: search, default, ngram
:param HMM: enable HMM
:return: (w, start, start + width) model='default'
"""
if mode == 'ngram':
n = 2
result_set = set()
tokens = self.model.lcut(unicode_sentence)
tokens_len = len(tokens)
start = 0
for i in range(0, tokens_len):
w = tokens[i]
width = len(w)
result_set.add((w, start, start + width))
for j in range(i, i + n):
gram = "".join(tokens[i:j + 1])
gram_width = len(gram)
if i + j > tokens_len:
break
result_set.add((gram, start, start + gram_width))
start += width
results = list(result_set)
result = sorted(results, key=lambda x: x[-1])
else:
result = list(self.model.tokenize(unicode_sentence, mode=mode))
return result
# FILE pycorrector/pycorrector/utils/tokenizer.py
def tokenize_words(text):
"""Word segmentation"""
output = []
sentences = split_2_short_text(text, include_symbol=True)
for sentence, idx in sentences:
if is_chinese_string(sentence):
import jieba
output.extend(jieba.lcut(sentence))
else:
output.extend(whitespace_tokenize(sentence))
return output
# FILE pycorrector/pycorrector/utils/tokenizer.py
def split_2_short_text(text, include_symbol=True):
"""
文本切分为句子,以标点符号切分
:param text: str
:param include_symbol: bool
:return: (sentence, idx)
"""
result = []
sentences = re_han.split(text)
start_idx = 0
for sentence in sentences:
if not sentence:
continue
if include_symbol:
result.append((sentence, start_idx))
else:
if re_han.match(sentence):
result.append((sentence, start_idx))
start_idx += len(sentence)
return result
# FILE pycorrector/pycorrector/utils/text_utils.py
def is_chinese_string(string):
"""判断是否全为汉字"""
return all(is_chinese(c) for c in string)
# FILE pycorrector/pycorrector/utils/tokenizer.py
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
tokens = []
if not text:
return tokens
sents = split_2_short_text(text, include_symbol=True)
for sent, idx in sents:
tokens.extend(sent.split())
return tokens
# FILE pycorrector/pycorrector/utils/tokenizer.py
class FullTokenizer(object):
"""Given Full tokenization."""
def __init__(self, lower=True):
self.lower = lower
def tokenize(self, text):
"""Tokenizes a piece of text."""
res = []
if len(text) == 0:
return res
if self.lower:
text = text.lower()
# for the multilingual and Chinese
res = tokenize_words(text)
return res
# FILE pycorrector/pycorrector/utils/tokenizer.py
class Tokenizer(object):
def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None):
self.model = jieba
jieba.setLogLevel("ERROR")
# 初始化大词典
if os.path.exists(dict_path):
self.model.set_dictionary(dict_path)
# 加载用户自定义词典
if custom_word_freq_dict:
for w, f in custom_word_freq_dict.items():
self.model.add_word(w, freq=f)
# 加载混淆集词典
if custom_confusion_dict:
for k, word in custom_confusion_dict.items():
# 添加到分词器的自定义词典中
self.model.add_word(k)
self.model.add_word(word)
def tokenize(self, unicode_sentence, mode="search"):
"""
切词并返回切词位置, search mode用于错误扩召回
:param unicode_sentence: query
:param mode: search, default, ngram
:param HMM: enable HMM
:return: (w, start, start + width) model='default'
"""
if mode == 'ngram':
n = 2
result_set = set()
tokens = self.model.lcut(unicode_sentence)
tokens_len = len(tokens)
start = 0
for i in range(0, tokens_len):
w = tokens[i]
width = len(w)
result_set.add((w, start, start + width))
for j in range(i, i + n):
gram = "".join(tokens[i:j + 1])
gram_width = len(gram)
if i + j > tokens_len:
break
result_set.add((gram, start, start + gram_width))
start += width
results = list(result_set)
result = sorted(results, key=lambda x: x[-1])
else:
result = list(self.model.tokenize(unicode_sentence, mode=mode))
return result
Based on the information above, please complete the function:
#CURRENT_FILE: pycorrector/pycorrector/utils/tokenizer.py
import os
import re
import jieba
from jieba import posseg
from pycorrector.utils.text_utils import is_chinese_string
import jieba
def segment(sentence, cut_type='word', pos=False):
"""
切词
:param sentence:
:param cut_type: 'word' use jieba.lcut; 'char' use list(sentence)
:param pos: enable POS
:return: list
"""
|
pycorrector/pycorrector/utils/tokenizer.py
|
jinja2.utils.object_type_repr
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# LIB typing_extensions.py
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
# FILE Jinja2/src/jinja2/utils.py
class LRUCache:
"""A simple LRU Cache implementation."""
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
...
def __getstate__(self) -> t.Mapping[str, t.Any]:
...
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
...
def __getnewargs__(self) -> t.Tuple:
...
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
...
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
...
def clear(self) -> None:
"""Clear the cache."""
...
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
...
def __len__(self) -> int:
"""Return the current size of the cache."""
...
def __repr__(self) -> str:
...
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
...
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
...
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
...
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
...
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
...
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
...
Based on the information above, please complete the function:
#CURRENT_FILE: Jinja2/src/jinja2/utils.py
import enum
import json
import os
import re
import typing as t
from collections import abc
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from types import CodeType
from urllib.parse import quote_from_bytes
import markupsafe
import typing_extensions as te
from .runtime import Undefined
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
from pprint import pformat
from .constants import LOREM_IPSUM_WORDS
def object_type_repr(obj: t.Any) -> str:
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
|
Jinja2/src/jinja2/utils.py
|
jinja2.utils.LRUCache.setdefault
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE Jinja2/src/jinja2/utils.py
class Namespace:
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name: str) -> t.Any:
...
def __setitem__(self, name: str, value: t.Any) -> None:
self.__attrs[name] = value
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/utils.py
class Namespace:
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name: str) -> t.Any:
...
def __setitem__(self, name: str, value: t.Any) -> None:
...
def __repr__(self) -> str:
return f"<Namespace {self.__attrs!r}>"
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
return type(self) is type(other)
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
return id(type(self))
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
return ""
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
return 0
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
yield from ()
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
return False
def __repr__(self) -> str:
...
# FILE Jinja2/src/jinja2/runtime.py
class Undefined:
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError,
) -> None:
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
def _undefined_message(self) -> str:
"""Build a message about the undefined value based on how it was
accessed.
"""
...
def _fail_with_undefined_error(
"""Raise an :exc:`UndefinedError` when operations are performed
on the undefined value.
"""
...
def __getattr__(self, name: str) -> t.Any:
...
def __eq__(self, other: t.Any) -> bool:
...
def __ne__(self, other: t.Any) -> bool:
...
def __hash__(self) -> int:
...
def __str__(self) -> str:
...
def __len__(self) -> int:
...
def __iter__(self) -> t.Iterator[t.Any]:
...
def __bool__(self) -> bool:
...
def __repr__(self) -> str:
return "Undefined"
# FILE Jinja2/src/jinja2/utils.py
class Cycler:
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
Similar to ``loop.cycle``, but can be used outside loops or across
multiple loops. For example, render a list of folders and files in a
list, alternating giving them "odd" and "even" classes.
.. code-block:: html+jinja
{% set row_class = cycler("odd", "even") %}
<ul class="browser">
{% for folder in folders %}
<li class="folder {{ row_class.next() }}">{{ folder }}
{% endfor %}
{% for file in files %}
<li class="file {{ row_class.next() }}">{{ file }}
{% endfor %}
</ul>
:param items: Each positional argument will be yielded in the order
given for each cycle.
.. versionadded:: 2.1
"""
def __init__(self, *items: t.Any) -> None:
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
def reset(self) -> None:
"""Resets the current item to the first item."""
...
def current(self) -> t.Any:
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
return self.items[self.pos]
def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
...
Based on the information above, please complete the function:
#CURRENT_FILE: Jinja2/src/jinja2/utils.py
import enum
import json
import os
import re
import typing as t
from collections import abc
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from types import CodeType
from urllib.parse import quote_from_bytes
import markupsafe
import typing_extensions as te
from .runtime import Undefined
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
from pprint import pformat
from .constants import LOREM_IPSUM_WORDS
class LRUCache:
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self) -> t.Mapping[str, t.Any]:
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self) -> t.Tuple:
return (self.capacity,)
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
|
Jinja2/src/jinja2/utils.py
|
sumy.summarizers.sum_basic.SumBasicSummarizer._compute_word_freq
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
@staticmethod
def normalize_word(word):
return to_unicode(word).lower()
@staticmethod
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/summarizers/sum_basic.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ._summarizer import AbstractSummarizer
class SumBasicSummarizer(AbstractSummarizer):
"""
SumBasic: a frequency-based summarization system that adjusts word frequencies as
sentences are extracted.
Source: http://www.cis.upenn.edu/~nenkova/papers/ipm.pdf
"""
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
sentences = document.sentences
ratings = self._compute_ratings(sentences)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _get_all_words_in_doc(self, sentences):
return self._stem_words([w for s in sentences for w in s.words])
def _get_content_words_in_sentence(self, sentence):
normalized_words = self._normalize_words(sentence.words)
normalized_content_words = self._filter_out_stop_words(normalized_words)
stemmed_normalized_content_words = self._stem_words(normalized_content_words)
return stemmed_normalized_content_words
def _stem_words(self, words):
return [self.stem_word(w) for w in words]
def _normalize_words(self, words):
return [self.normalize_word(w) for w in words]
def _filter_out_stop_words(self, words):
return [w for w in words if w not in self.stop_words]
@staticmethod
def _compute_word_freq(list_of_words):
|
sumy/sumy/summarizers/sum_basic.py
|
sumy.summarizers.sum_basic.SumBasicSummarizer._compute_average_probability_of_words
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
@staticmethod
def normalize_word(word):
return to_unicode(word).lower()
@staticmethod
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/summarizers/sum_basic.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ._summarizer import AbstractSummarizer
class SumBasicSummarizer(AbstractSummarizer):
"""
SumBasic: a frequency-based summarization system that adjusts word frequencies as
sentences are extracted.
Source: http://www.cis.upenn.edu/~nenkova/papers/ipm.pdf
"""
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
sentences = document.sentences
ratings = self._compute_ratings(sentences)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _get_all_words_in_doc(self, sentences):
return self._stem_words([w for s in sentences for w in s.words])
def _get_content_words_in_sentence(self, sentence):
normalized_words = self._normalize_words(sentence.words)
normalized_content_words = self._filter_out_stop_words(normalized_words)
stemmed_normalized_content_words = self._stem_words(normalized_content_words)
return stemmed_normalized_content_words
def _stem_words(self, words):
return [self.stem_word(w) for w in words]
def _normalize_words(self, words):
return [self.normalize_word(w) for w in words]
def _filter_out_stop_words(self, words):
return [w for w in words if w not in self.stop_words]
@staticmethod
def _compute_word_freq(list_of_words):
word_freq = {}
for w in list_of_words:
word_freq[w] = word_freq.get(w, 0) + 1
return word_freq
def _get_all_content_words_in_doc(self, sentences):
all_words = self._get_all_words_in_doc(sentences)
content_words = self._filter_out_stop_words(all_words)
normalized_content_words = self._normalize_words(content_words)
return normalized_content_words
def _compute_tf(self, sentences):
"""
Computes the normalized term frequency as explained in http://www.tfidf.com/
"""
content_words = self._get_all_content_words_in_doc(sentences)
content_words_count = len(content_words)
content_words_freq = self._compute_word_freq(content_words)
content_word_tf = dict((k, v / content_words_count) for (k, v) in content_words_freq.items())
return content_word_tf
@staticmethod
def _compute_average_probability_of_words(word_freq_in_doc, content_words_in_sentence):
|
sumy/sumy/summarizers/sum_basic.py
|
sumy.summarizers.lex_rank.LexRankSummarizer._compute_idf
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
@staticmethod
def normalize_word(word):
return to_unicode(word).lower()
@staticmethod
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/summarizers/lex_rank.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
import numpy
from collections import Counter
from ._summarizer import AbstractSummarizer
class LexRankSummarizer(AbstractSummarizer):
"""
LexRank: Graph-based Centrality as Salience in Text Summarization
Source: http://tangra.si.umich.edu/~radev/lexrank/lexrank.pdf
"""
threshold = 0.1
epsilon = 0.1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependencies_installed()
sentences_words = [self._to_words_set(s) for s in document.sentences]
if not sentences_words:
return tuple()
tf_metrics = self._compute_tf(sentences_words)
idf_metrics = self._compute_idf(sentences_words)
matrix = self._create_matrix(sentences_words, self.threshold, tf_metrics, idf_metrics)
scores = self.power_method(matrix, self.epsilon)
ratings = dict(zip(document.sentences, scores))
return self._get_best_sentences(document.sentences, sentences_count, ratings)
@staticmethod
def _ensure_dependencies_installed():
if numpy is None:
raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _to_words_set(self, sentence):
words = map(self.normalize_word, sentence.words)
return [self.stem_word(w) for w in words if w not in self._stop_words]
def _compute_tf(self, sentences):
tf_values = map(Counter, sentences)
tf_metrics = []
for sentence in tf_values:
metrics = {}
max_tf = self._find_tf_max(sentence)
for term, tf in sentence.items():
metrics[term] = tf / max_tf
tf_metrics.append(metrics)
return tf_metrics
@staticmethod
def _find_tf_max(terms):
return max(terms.values()) if terms else 1
@staticmethod
def _compute_idf(sentences):
|
sumy/sumy/summarizers/lex_rank.py
|
sumy.summarizers.lex_rank.LexRankSummarizer.cosine_similarity
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
@staticmethod
def normalize_word(word):
return to_unicode(word).lower()
@staticmethod
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
...
# FILE sumy/sumy/summarizers/_summarizer.py
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
...
def stem_word(self, word):
...
def normalize_word(word):
...
def _get_best_sentences(sentences, count, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
if not callable(count):
count = ItemsCount(count)
infos = count(infos)
# sort sentences by their order in document
infos = sorted(infos, key=attrgetter("order"))
return tuple(i.sentence for i in infos)
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/summarizers/lex_rank.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
import numpy
from collections import Counter
from ._summarizer import AbstractSummarizer
class LexRankSummarizer(AbstractSummarizer):
"""
LexRank: Graph-based Centrality as Salience in Text Summarization
Source: http://tangra.si.umich.edu/~radev/lexrank/lexrank.pdf
"""
threshold = 0.1
epsilon = 0.1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependencies_installed()
sentences_words = [self._to_words_set(s) for s in document.sentences]
if not sentences_words:
return tuple()
tf_metrics = self._compute_tf(sentences_words)
idf_metrics = self._compute_idf(sentences_words)
matrix = self._create_matrix(sentences_words, self.threshold, tf_metrics, idf_metrics)
scores = self.power_method(matrix, self.epsilon)
ratings = dict(zip(document.sentences, scores))
return self._get_best_sentences(document.sentences, sentences_count, ratings)
@staticmethod
def _ensure_dependencies_installed():
if numpy is None:
raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _to_words_set(self, sentence):
words = map(self.normalize_word, sentence.words)
return [self.stem_word(w) for w in words if w not in self._stop_words]
def _compute_tf(self, sentences):
tf_values = map(Counter, sentences)
tf_metrics = []
for sentence in tf_values:
metrics = {}
max_tf = self._find_tf_max(sentence)
for term, tf in sentence.items():
metrics[term] = tf / max_tf
tf_metrics.append(metrics)
return tf_metrics
@staticmethod
def _find_tf_max(terms):
return max(terms.values()) if terms else 1
@staticmethod
def _compute_idf(sentences):
idf_metrics = {}
sentences_count = len(sentences)
for sentence in sentences:
for term in sentence:
if term not in idf_metrics:
n_j = sum(1 for s in sentences if term in s)
idf_metrics[term] = math.log(sentences_count / (1 + n_j))
return idf_metrics
def _create_matrix(self, sentences, threshold, tf_metrics, idf_metrics):
"""
Creates matrix of shape |sentences|×|sentences|.
"""
# create matrix |sentences|×|sentences| filled with zeroes
sentences_count = len(sentences)
matrix = numpy.zeros((sentences_count, sentences_count))
degrees = numpy.zeros((sentences_count, ))
for row, (sentence1, tf1) in enumerate(zip(sentences, tf_metrics)):
for col, (sentence2, tf2) in enumerate(zip(sentences, tf_metrics)):
matrix[row, col] = self.cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics)
if matrix[row, col] > threshold:
matrix[row, col] = 1.0
degrees[row] += 1
else:
matrix[row, col] = 0
for row in range(sentences_count):
for col in range(sentences_count):
if degrees[row] == 0:
degrees[row] = 1
matrix[row][col] = matrix[row][col] / degrees[row]
return matrix
@staticmethod
def cosine_similarity(sentence1, sentence2, tf1, tf2, idf_metrics):
"""
We compute idf-modified-cosine(sentence1, sentence2) here.
It's cosine similarity of these two sentences (vectors) A, B computed as cos(x, y) = A . B / (|A| . |B|)
Sentences are represented as vector TF*IDF metrics.
:param sentence1:
Iterable object where every item represents word of 1st sentence.
:param sentence2:
Iterable object where every item represents word of 2nd sentence.
:type tf1: dict
:param tf1:
Term frequencies of words from 1st sentence.
:type tf2: dict
:param tf2:
Term frequencies of words from 2nd sentence
:type idf_metrics: dict
:param idf_metrics:
Inverted document metrics of the sentences. Every sentence is treated as document for this algorithm.
:rtype: float
:return:
Returns -1.0 for opposite similarity, 1.0 for the same sentence and zero for no similarity between sentences.
"""
|
sumy/sumy/summarizers/lex_rank.py
|
sumy.evaluation.rouge._get_ngrams
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/evaluation/rouge.py
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
:param evaluated_sentences:
The sentences that have been picked by the summarizer
:param reference_sentences:
The sentences from the reference set
:param n: Size of ngram. Defaults to 2.
:returns:
float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means
exactly the same.
:raises ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise (ValueError("Collections must contain at least 1 sentence."))
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
return overlapping_count / reference_count
# FILE sumy/sumy/evaluation/rouge.py
def _get_word_ngrams(n, sentences):
assert (len(sentences) > 0)
assert (n > 0)
words = set()
for sentence in sentences:
words.update(_get_ngrams(n, _split_into_words([sentence])))
return words
# FILE sumy/sumy/evaluation/rouge.py
def _split_into_words(sentences):
full_text_words = []
for s in sentences:
if not isinstance(s, Sentence):
raise (ValueError("Object in collection must be of type Sentence"))
full_text_words.extend(s.words)
return full_text_words
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/evaluation/rouge.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ..models.dom import Sentence
def _get_ngrams(n, text):
|
sumy/sumy/evaluation/rouge.py
|
sumy.evaluation.rouge._split_into_words
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE sumy/sumy/evaluation/rouge.py
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
:param evaluated_sentences:
The sentences that have been picked by the summarizer
:param reference_sentences:
The sentences from the reference set
:param n: Size of ngram. Defaults to 2.
:returns:
float 0 <= ROUGE-N <= 1, where 0 means no overlap and 1 means
exactly the same.
:raises ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise (ValueError("Collections must contain at least 1 sentence."))
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
return overlapping_count / reference_count
# FILE sumy/sumy/evaluation/rouge.py
def _get_word_ngrams(n, sentences):
assert (len(sentences) > 0)
assert (n > 0)
words = set()
for sentence in sentences:
words.update(_get_ngrams(n, _split_into_words([sentence])))
return words
Based on the information above, please complete the function:
#CURRENT_FILE: sumy/sumy/evaluation/rouge.py
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ..models.dom import Sentence
def _split_into_words(sentences):
|
sumy/sumy/evaluation/rouge.py
|
falcon.inspect.register_router
|
You are a Python programmer. Here is all the context you may find useful to complete the function:
# FILE falcon/falcon/inspect.py
def to_string(self, verbose=False, internal=False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
# FILE falcon/falcon/inspect.py
class StringVisitor(InspectVisitor):
"""Visitor that returns a string representation of the info class.
This is used automatically by calling ``to_string()`` on the info class.
It can also be used directly by calling ``StringVisitor.process(info_instance)``.
Args:
verbose (bool, optional): Adds more information. Defaults to ``False``.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
"""
def __init__(self, verbose=False, internal=False, name=''):
self.verbose = verbose
self.internal = internal
self.name = name
self.indent = 0
@property
def tab(self):
"""Get the current tabulation."""
return ' ' * self.indent
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
"""Visit a RouteMethodInfo instance. Usually called by `process`."""
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
def _methods_to_string(self, methods: List):
"""Return a string from the list of methods."""
tab = self.tab + ' ' * 3
methods = _filter_internal(methods, self.internal)
if not methods:
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}├── {}'.format(tab, m) for m in text_list[:-1]]
method_text += ['{}└── {}'.format(tab, m) for m in text_list[-1:]]
return '\n'.join(method_text)
def visit_route(self, route: RouteInfo) -> str:
"""Visit a RouteInfo instance. Usually called by `process`."""
text = '{0}⇒ {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
"""Visit a StaticRouteInfo instance. Usually called by `process`."""
text = '{0}↦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
def visit_sink(self, sink: SinkInfo) -> str:
"""Visit a SinkInfo instance. Usually called by `process`."""
text = '{0}⇥ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}⇜ {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
"""Visit a MiddlewareMethodInfo instance. Usually called by `process`."""
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
"""Visit a ErrorHandlerInfo instance. Usually called by `process`."""
text = '{0}↣ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if not method_text:
return text
return '{}:\n{}'.format(text, method_text)
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
"""Visit a MiddlewareTreeItemInfo instance. Usually called by `process`."""
symbol = mti._symbols.get(mti.name, '→')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
"""Visit a MiddlewareTreeInfo instance. Usually called by `process`."""
before = len(m_tree.request) + len(m_tree.resource)
after = len(m_tree.response)
if before + after == 0:
return ''
each = 2
initial = self.indent
if after > before:
self.indent += each * (after - before)
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if m_tree.resource or not text:
text.append('')
self.indent += each
text.append('{}├── Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
"""Visit a MiddlewareInfo instance. Usually called by `process`."""
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join(self.process(m) for m in middleware.middleware_classes)
self.indent -= 4
if m_text:
text += '\n{}- Middleware classes:\n{}'.format(self.tab, m_text)
return text
def visit_app(self, app: AppInfo) -> str:
"""Visit a AppInfo instance. Usually called by `process`."""
type_ = 'ASGI' if app.asgi else 'WSGI'
self.indent = 4
text = '{} ({})'.format(self.name or 'Falcon App', type_)
if app.routes:
routes = '\n'.join(self.process(r) for r in app.routes)
text += '\n• Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\n• Middleware ({}):\n{}'.format(
app.middleware.independent_text, middleware_text
)
if app.static_routes:
static_routes = '\n'.join(self.process(sr) for sr in app.static_routes)
text += '\n• Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join(self.process(s) for s in app.sinks)
text += '\n• Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join(self.process(e) for e in errors)
text += '\n• Error handlers:\n{}'.format(errs)
return text
# FILE falcon/falcon/inspect.py
def inspect_app(app: App) -> 'AppInfo':
"""Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
"""
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middleware(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
# FILE falcon/falcon/inspect.py
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
"""Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
"""
routes = []
for sr, _, _ in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
# FILE falcon/falcon/inspect.py
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
"""Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
"""
def _traverse(roots, parent):
for root in roots:
path = parent + '/' + root.raw_segment
if root.resource is not None:
methods = []
if root.method_map:
for method, func in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(
method, source_info, real_func.__name__, internal
)
methods.append(method_info)
source_info, class_name = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = [] # type: List[RouteInfo]
_traverse(router._roots, '')
return routes
# FILE falcon/falcon/app.py
class App:
"""This class is the main entry point into a Falcon-based WSGI app.
Each App instance provides a callable
`WSGI <https://www.python.org/dev/peps/pep-3333/>`_ interface
and a routing engine (for ASGI applications, see
:class:`falcon.asgi.App`).
Note:
The ``API`` class was renamed to ``App`` in Falcon 3.0. The
old class name remains available as an alias for
backwards-compatibility, but will be removed in a future
release.
Keyword Arguments:
media_type (str): Default media type to use when initializing
:py:class:`~.RequestOptions` and
:py:class:`~.ResponseOptions`. The ``falcon``
module provides a number of constants for common media types,
such as ``falcon.MEDIA_MSGPACK``, ``falcon.MEDIA_YAML``,
``falcon.MEDIA_XML``, etc.
middleware: Either a single middleware component object or an iterable
of objects (instantiated classes) that implement the following
middleware component interface. Note that it is only necessary
to implement the methods for the events you would like to
handle; Falcon simply skips over any missing middleware methods::
class ExampleComponent:
def process_request(self, req, resp):
\"\"\"Process the request before routing it.
Note:
Because Falcon routes each request based on
req.path, a request can be effectively re-routed
by setting that attribute to a new value from
within process_request().
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
\"\"\"
def process_resource(self, req, resp, resource, params):
\"\"\"Process the request and resource *after* routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed. May be None if no route was found for
the request.
params: A dict-like object representing any
additional params derived from the route's URI
template fields, that will be passed to the
resource's responder method as keyword
arguments.
\"\"\"
def process_response(self, req, resp, resource, req_succeeded)
\"\"\"Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
req_succeeded: True if no exceptions were raised
while the framework processed and routed the
request; otherwise False.
\"\"\"
(See also: :ref:`Middleware <middleware>`)
request_type: ``Request``-like class to use instead
of Falcon's default class. Among other things, this feature
affords inheriting from :class:`falcon.Request` in order
to override the ``context_type`` class variable
(default: :class:`falcon.Request`)
response_type: ``Response``-like class to use
instead of Falcon's default class (default:
:class:`falcon.Response`)
router (object): An instance of a custom router
to use in lieu of the default engine.
(See also: :ref:`Custom Routers <routing_custom>`)
independent_middleware (bool): Set to ``False`` if response
middleware should not be executed independently of whether or
not request middleware raises an exception (default
``True``). When this option is set to ``False``, a middleware
component's ``process_response()`` method will NOT be called
when that same component's ``process_request()`` (or that of
a component higher up in the stack) raises an exception.
cors_enable (bool): Set this flag to ``True`` to enable a simple
CORS policy for all responses, including support for preflighted
requests. An instance of :py:class:`~.CORSMiddleware` can instead be
passed to the middleware argument to customize its behaviour.
(default ``False``).
(See also: :ref:`CORS <cors>`)
sink_before_static_route (bool): Indicates if the sinks should be processed
before (when ``True``) or after (when ``False``) the static routes.
This has an effect only if no route was matched. (default ``True``)
Attributes:
req_options: A set of behavioral options related to incoming
requests. (See also: :py:class:`~.RequestOptions`)
resp_options: A set of behavioral options related to outgoing
responses. (See also: :py:class:`~.ResponseOptions`)
router_options: Configuration options for the router. If a
custom router is in use, and it does not expose any
configurable options, referencing this attribute will raise
an instance of ``AttributeError``.
(See also: :ref:`CompiledRouterOptions <compiled_router_options>`)
"""
_META_METHODS = frozenset(constants._META_METHODS)
_STREAM_BLOCK_SIZE = 8 * 1024 # 8 KiB
_STATIC_ROUTE_TYPE = routing.StaticRoute
# NOTE(kgriffs): This makes it easier to tell what we are dealing with
# without having to import falcon.asgi to get at the falcon.asgi.App
# type (which we may not be able to do under Python 3.5).
_ASGI = False
# NOTE(kgriffs): We do it like this rather than just implementing the
# methods directly on the class, so that we keep all the default
# responders colocated in the same module. This will make it more
# likely that the implementations of the async and non-async versions
# of the methods are kept in sync (pun intended).
_default_responder_bad_request = responders.bad_request
_default_responder_path_not_found = responders.path_not_found
__slots__ = (
'_cors_enable',
'_error_handlers',
'_independent_middleware',
'_middleware',
# NOTE(kgriffs): WebSocket is currently only supported for
# ASGI apps, but we may add support for WSGI at some point.
'_middleware_ws',
'_request_type',
'_response_type',
'_router_search',
'_router',
'_serialize_error',
'_sink_and_static_routes',
'_sink_before_static_route',
'_sinks',
'_static_routes',
'_unprepared_middleware',
'req_options',
'resp_options',
)
def __init__(
self,
media_type=constants.DEFAULT_MEDIA_TYPE,
request_type=Request,
response_type=Response,
middleware=None,
router=None,
independent_middleware=True,
cors_enable=False,
sink_before_static_route=True,
):
self._sink_before_static_route = sink_before_static_route
self._sinks = []
self._static_routes = []
self._sink_and_static_routes = ()
if cors_enable:
cm = CORSMiddleware()
if middleware is None:
middleware = [cm]
else:
try:
# NOTE(kgriffs): Check to see if middleware is an
# iterable, and if so, append the CORSMiddleware
# instance.
iter(middleware)
middleware = list(middleware)
middleware.append(cm)
except TypeError:
# NOTE(kgriffs): Assume the middleware kwarg references
# a single middleware component.
middleware = [middleware, cm]
# set middleware
self._unprepared_middleware = []
self._independent_middleware = independent_middleware
self.add_middleware(middleware)
self._router = router or routing.DefaultRouter()
self._router_search = self._router.find
self._request_type = request_type
self._response_type = response_type
self._error_handlers = {}
self._serialize_error = helpers.default_serialize_error
self.req_options = RequestOptions()
self.resp_options = ResponseOptions()
self.req_options.default_media_type = media_type
self.resp_options.default_media_type = media_type
# NOTE(kgriffs): Add default error handlers
self.add_error_handler(Exception, self._python_error_handler)
self.add_error_handler(HTTPError, self._http_error_handler)
self.add_error_handler(HTTPStatus, self._http_status_handler)
def __call__(self, env, start_response): # noqa: C901
"""WSGI `app` method.
Makes instances of App callable from a WSGI server. May be used to
host an App or called directly in order to simulate requests when
testing the App.
(See also: PEP 3333)
Args:
env (dict): A WSGI environment dictionary
start_response (callable): A WSGI helper function for setting
status and headers on a response.
"""
req = self._request_type(env, options=self.req_options)
resp = self._response_type(options=self.resp_options)
resource = None
responder = None
params = {}
dependent_mw_resp_stack = []
mw_req_stack, mw_rsrc_stack, mw_resp_stack = self._middleware
req_succeeded = False
try:
if req.method in self._META_METHODS:
raise HTTPBadRequest()
# NOTE(ealogar): The execution of request middleware
# should be before routing. This will allow request mw
# to modify the path.
# NOTE: if flag set to use independent middleware, execute
# request middleware independently. Otherwise, only queue
# response middleware after request middleware succeeds.
if self._independent_middleware:
for process_request in mw_req_stack:
process_request(req, resp)
if resp.complete:
break
else:
for process_request, process_response in mw_req_stack:
if process_request and not resp.complete:
process_request(req, resp)
if process_response:
dependent_mw_resp_stack.insert(0, process_response)
if not resp.complete:
# NOTE(warsaw): Moved this to inside the try except
# because it is possible when using object-based
# traversal for _get_responder() to fail. An example is
# a case where an object does not have the requested
# next-hop child resource. In that case, the object
# being asked to dispatch to its child will raise an
# HTTP exception signalling the problem, e.g. a 404.
responder, params, resource, req.uri_template = self._get_responder(req)
except Exception as ex:
if not self._handle_exception(req, resp, ex, params):
raise
else:
try:
# NOTE(kgriffs): If the request did not match any
# route, a default responder is returned and the
# resource is None. In that case, we skip the
# resource middleware methods. Resource will also be
# None when a middleware method already set
# resp.complete to True.
if resource:
# Call process_resource middleware methods.
for process_resource in mw_rsrc_stack:
process_resource(req, resp, resource, params)
if resp.complete:
break
if not resp.complete:
responder(req, resp, **params)
req_succeeded = True
except Exception as ex:
if not self._handle_exception(req, resp, ex, params):
raise
# Call process_response middleware methods.
for process_response in mw_resp_stack or dependent_mw_resp_stack:
try:
process_response(req, resp, resource, req_succeeded)
except Exception as ex:
if not self._handle_exception(req, resp, ex, params):
raise
req_succeeded = False
body = []
length = 0
try:
body, length = self._get_body(resp, env.get('wsgi.file_wrapper'))
except Exception as ex:
if not self._handle_exception(req, resp, ex, params):
raise
req_succeeded = False
resp_status = code_to_http_status(resp.status)
default_media_type = self.resp_options.default_media_type
if req.method == 'HEAD' or resp_status in _BODILESS_STATUS_CODES:
body = []
# PERF(vytas): move check for the less common and much faster path
# of resp_status being in {204, 304} here; NB: this builds on the
# assumption _TYPELESS_STATUS_CODES <= _BODILESS_STATUS_CODES.
# NOTE(kgriffs): Based on wsgiref.validate's interpretation of
# RFC 2616, as commented in that module's source code. The
# presence of the Content-Length header is not similarly
# enforced.
if resp_status in _TYPELESS_STATUS_CODES:
default_media_type = None
elif (
length is not None
and req.method == 'HEAD'
and resp_status not in _BODILESS_STATUS_CODES
and 'content-length' not in resp._headers
):
# NOTE(kgriffs): We really should be returning a Content-Length
# in this case according to my reading of the RFCs. By
# optionally using len(data) we let a resource simulate HEAD
# by turning around and calling it's own on_get().
resp._headers['content-length'] = str(length)
else:
# PERF(kgriffs): Böse mußt sein. Operate directly on resp._headers
# to reduce overhead since this is a hot/critical code path.
# NOTE(kgriffs): We always set content-length to match the
# body bytes length, even if content-length is already set. The
# reason being that web servers and LBs behave unpredictably
# when the header doesn't match the body (sometimes choosing to
# drop the HTTP connection prematurely, for example).
if length is not None:
resp._headers['content-length'] = str(length)
headers = resp._wsgi_headers(default_media_type)
# Return the response per the WSGI spec.
start_response(resp_status, headers)
return body
@property
def router_options(self):
return self._router.options
def add_middleware(self, middleware):
"""Add one or more additional middleware components.
Arguments:
middleware: Either a single middleware component or an iterable
of components to add. The component(s) will be invoked, in
order, as if they had been appended to the original middleware
list passed to the class initializer.
"""
# NOTE(kgriffs): Since this is called by the initializer, there is
# the chance that middleware may be None.
if middleware:
try:
self._unprepared_middleware += middleware
except TypeError:
# middleware is not iterable; assume it is just one bare component
self._unprepared_middleware.append(middleware)
# NOTE(kgriffs): Even if middleware is None or an empty list, we still
# need to make sure self._middleware is initialized if this is the
# first call to add_middleware().
self._middleware = self._prepare_middleware(
self._unprepared_middleware,
independent_middleware=self._independent_middleware,
)
def add_route(self, uri_template, resource, **kwargs):
"""Associate a templatized URI path with a resource.
Falcon routes incoming requests to resources based on a set of
URI templates. If the path requested by the client matches the
template for a given route, the request is then passed on to the
associated resource for processing.
Note:
If no route matches the request, control then passes to a default
responder that simply raises an instance of
:class:`~.HTTPRouteNotFound`. By default, this error will be
rendered as a 404 response, but this behavior can be modified by
adding a custom error handler (see also
:ref:`this FAQ topic <faq_override_404_500_handlers>`).
On the other hand, if a route is matched but the resource does not
implement a responder for the requested HTTP method, the framework
invokes a default responder that raises an instance of
:class:`~.HTTPMethodNotAllowed`.
This method delegates to the configured router's ``add_route()``
method. To override the default behavior, pass a custom router
object to the :class:`~.App` initializer.
(See also: :ref:`Routing <routing>`)
Args:
uri_template (str): A templatized URI. Care must be
taken to ensure the template does not mask any sink
patterns, if any are registered.
(See also: :meth:`~.App.add_sink`)
Warning:
If :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash`
is enabled, `uri_template` should be provided without a
trailing slash.
(See also: :ref:`trailing_slash_in_path`)
resource (instance): Object which represents a REST
resource. Falcon will pass GET requests to ``on_get()``,
PUT requests to ``on_put()``, etc. If any HTTP methods are not
supported by your resource, simply don't define the
corresponding request handlers, and Falcon will do the right
thing.
Note:
When using an async version of the ``App``, all request
handlers must be awaitable coroutine functions.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc. In this way, multiple closely-related routes can be
mapped to the same resource. For example, a single resource
class can use suffixed responders to distinguish requests
for a single item vs. a collection of those same items.
Another class might use a suffixed responder to handle
a shortlink route in addition to the regular route for the
resource. For example::
class Baz(object):
def on_get_foo(self, req, resp):
pass
def on_get_bar(self, req, resp):
pass
baz = Baz()
app = falcon.App()
app.add_route('/foo', baz, suffix='foo')
app.add_route('/bar', baz, suffix='bar')
compile (bool): Optional flag that can be provided when using the default
:class:`.CompiledRouter` to compile the routing logic on this call,
since it will otherwise delay compilation until the first request
is routed. See :meth:`.CompiledRouter.add_route` for further details.
Note:
Any additional keyword arguments not defined above are passed
through to the underlying router's ``add_route()`` method. The
default router ignores any additional keyword arguments, but
custom routers may take advantage of this feature to receive
additional options when setting up routes. Custom routers MUST
accept such arguments using the variadic pattern (``**kwargs``), and
ignore any keyword arguments that they don't support.
"""
# NOTE(richardolsson): Doing the validation here means it doesn't have
# to be duplicated in every future router implementation.
if not isinstance(uri_template, str):
raise TypeError('uri_template is not a string')
if not uri_template.startswith('/'):
raise ValueError("uri_template must start with '/'")
if '//' in uri_template:
raise ValueError("uri_template may not contain '//'")
self._router.add_route(uri_template, resource, **kwargs)
def add_static_route(
self, prefix, directory, downloadable=False, fallback_filename=None
):
"""Add a route to a directory of static files.
Static routes provide a way to serve files directly. This
feature provides an alternative to serving files at the web server
level when you don't have that option, when authorization is
required, or for testing purposes.
Warning:
Serving files directly from the web server,
rather than through the Python app, will always be more efficient,
and therefore should be preferred in production deployments.
For security reasons, the directory and the fallback_filename (if provided)
should be read only for the account running the application.
Warning:
If you need to serve large files and/or progressive downloads (such
as in the case of video streaming) through the Falcon app, check
that your application server's timeout settings can accomodate the
expected request duration (for instance, the popular Gunicorn kills
``sync`` workers after 30 seconds unless configured otherwise).
Note:
For ASGI apps, file reads are made non-blocking by scheduling
them on the default executor.
Static routes are matched in LIFO order. Therefore, if the same
prefix is used for two routes, the second one will override the
first. This also means that more specific routes should be added
*after* less specific ones. For example, the following sequence
would result in ``'/foo/bar/thing.js'`` being mapped to the
``'/foo/bar'`` route, and ``'/foo/xyz/thing.js'`` being mapped to the
``'/foo'`` route::
app.add_static_route('/foo', foo_path)
app.add_static_route('/foo/bar', foobar_path)
Args:
prefix (str): The path prefix to match for this route. If the
path in the requested URI starts with this string, the remainder
of the path will be appended to the source directory to
determine the file to serve. This is done in a secure manner
to prevent an attacker from requesting a file outside the
specified directory.
Note that static routes are matched in LIFO order, and are only
attempted after checking dynamic routes and sinks.
directory (Union[str, pathlib.Path]): The source directory from
which to serve files.
downloadable (bool): Set to ``True`` to include a
Content-Disposition header in the response. The "filename"
directive is simply set to the name of the requested file.
fallback_filename (str): Fallback filename used when the requested file
is not found. Can be a relative path inside the prefix folder or
any valid absolute path.
"""
sr = self._STATIC_ROUTE_TYPE(
prefix,
directory,
downloadable=downloadable,
fallback_filename=fallback_filename,
)
self._static_routes.insert(0, (sr, sr, False))
self._update_sink_and_static_routes()
def add_sink(self, sink, prefix=r'/'):
"""Register a sink method for the App.
If no route matches a request, but the path in the requested URI
matches a sink prefix, Falcon will pass control to the
associated sink, regardless of the HTTP method requested.
Using sinks, you can drain and dynamically handle a large number
of routes, when creating static resources and responders would be
impractical. For example, you might use a sink to create a smart
proxy that forwards requests to one or more backend services.
Args:
sink (callable): A callable taking the form ``func(req, resp, **kwargs)``.
Note:
When using an async version of the ``App``, this must be a
coroutine.
prefix (str): A regex string, typically starting with '/', which
will trigger the sink if it matches the path portion of the
request's URI. Both strings and precompiled regex objects
may be specified. Characters are matched starting at the
beginning of the URI path.
Note:
Named groups are converted to kwargs and passed to
the sink as such.
Warning:
If the prefix overlaps a registered route template,
the route will take precedence and mask the sink.
(See also: :meth:`~.add_route`)
"""
if not self._ASGI and iscoroutinefunction(sink):
raise CompatibilityError(
'The sink method must be a regular synchronous function '
'in order to be used with a WSGI app.'
)
if not hasattr(prefix, 'match'):
# Assume it is a string
prefix = re.compile(prefix)
# NOTE(kgriffs): Insert at the head of the list such that
# in the case of a duplicate prefix, the last one added
# is preferred.
self._sinks.insert(0, (prefix, sink, True))
self._update_sink_and_static_routes()
def add_error_handler(self, exception, handler=None):
"""Register a handler for one or more exception types.
Error handlers may be registered for any exception type, including
:class:`~.HTTPError` or :class:`~.HTTPStatus`. This feature
provides a central location for logging and otherwise handling
exceptions raised by responders, hooks, and middleware components.
A handler can raise an instance of :class:`~.HTTPError` or
:class:`~.HTTPStatus` to communicate information about the issue to
the client. Alternatively, a handler may modify `resp`
directly.
An error handler "matches" a raised exception if the exception is an
instance of the corresponding exception type. If more than one error
handler matches the raised exception, the framework will choose the
most specific one, as determined by the method resolution order of the
raised exception type. If multiple error handlers are registered for the
*same* exception class, then the most recently-registered handler is
used.
For example, suppose we register error handlers as follows::
app = App()
app.add_error_handler(falcon.HTTPNotFound, custom_handle_not_found)
app.add_error_handler(falcon.HTTPError, custom_handle_http_error)
app.add_error_handler(Exception, custom_handle_uncaught_exception)
app.add_error_handler(falcon.HTTPNotFound, custom_handle_404)
If an instance of ``falcon.HTTPForbidden`` is raised, it will be
handled by ``custom_handle_http_error()``. ``falcon.HTTPError`` is a
superclass of ``falcon.HTTPForbidden`` and a subclass of ``Exception``,
so it is the most specific exception type with a registered handler.
If an instance of ``falcon.HTTPNotFound`` is raised, it will be handled
by ``custom_handle_404()``, not by ``custom_handle_not_found()``, because
``custom_handle_404()`` was registered more recently.
.. Note::
By default, the framework installs three handlers, one for
:class:`~.HTTPError`, one for :class:`~.HTTPStatus`, and one for
the standard ``Exception`` type, which prevents passing uncaught
exceptions to the WSGI server. These can be overridden by adding a
custom error handler method for the exception type in question.
Args:
exception (type or iterable of types): When handling a request,
whenever an error occurs that is an instance of the specified
type(s), the associated handler will be called. Either a single
type or an iterable of types may be specified.
handler (callable): A function or callable object taking the form
``func(req, resp, ex, params)``.
If not specified explicitly, the handler will default to
``exception.handle``, where ``exception`` is the error
type specified above, and ``handle`` is a static method
(i.e., decorated with ``@staticmethod``) that accepts
the same params just described. For example::
class CustomException(CustomBaseException):
@staticmethod
def handle(req, resp, ex, params):
# TODO: Log the error
# Convert to an instance of falcon.HTTPError
raise falcon.HTTPError(falcon.HTTP_792)
If an iterable of exception types is specified instead of
a single type, the handler must be explicitly specified.
.. versionchanged:: 3.0
The error handler is now selected by the most-specific matching
error class, rather than the most-recently registered matching error
class.
"""
def wrap_old_handler(old_handler):
# NOTE(kgriffs): This branch *is* actually tested by
# test_error_handlers.test_handler_signature_shim_asgi() (as
# verified manually via pdb), but for some reason coverage
# tracking isn't picking it up.
if iscoroutinefunction(old_handler): # pragma: no cover
@wraps(old_handler)
async def handler_async(req, resp, ex, params):
await old_handler(ex, req, resp, params)
return handler_async
@wraps(old_handler)
def handler(req, resp, ex, params):
old_handler(ex, req, resp, params)
return handler
if handler is None:
try:
handler = exception.handle
except AttributeError:
raise AttributeError(
'handler must either be specified '
'explicitly or defined as a static'
'method named "handle" that is a '
'member of the given exception class.'
)
# TODO(vytas): Remove this shimming in a future Falcon version.
arg_names = tuple(misc.get_argnames(handler))
if arg_names[0:1] in (
('e',),
('err',),
('error',),
('ex',),
('exception',),
) or arg_names[1:3] in (('req', 'resp'), ('request', 'response')):
handler = wrap_old_handler(handler)
try:
exception_tuple = tuple(exception)
except TypeError:
exception_tuple = (exception,)
for exc in exception_tuple:
if not issubclass(exc, BaseException):
raise TypeError('"exception" must be an exception type.')
self._error_handlers[exc] = handler
def set_error_serializer(self, serializer):
"""Override the default serializer for instances of :class:`~.HTTPError`.
When a responder raises an instance of :class:`~.HTTPError`,
Falcon converts it to an HTTP response automatically. The
default serializer supports JSON and XML, but may be overridden
by this method to use a custom serializer in order to support
other media types.
Note:
If a custom media type is used and the type includes a
"+json" or "+xml" suffix, the default serializer will
convert the error to JSON or XML, respectively.
Note:
A custom serializer set with this method may not be called if the
default error handler for :class:`~.HTTPError` has been overriden.
See :meth:`~.add_error_handler` for more details.
The :class:`~.HTTPError` class contains helper methods,
such as `to_json()` and `to_dict()`, that can be used from
within custom serializers. For example::
def my_serializer(req, resp, exception):
representation = None
preferred = req.client_prefers((falcon.MEDIA_YAML, falcon.MEDIA_JSON))
if preferred is not None:
if preferred == falcon.MEDIA_JSON:
resp.data = exception.to_json()
else:
resp.text = yaml.dump(exception.to_dict(), encoding=None)
resp.content_type = preferred
resp.append_header('Vary', 'Accept')
Args:
serializer (callable): A function taking the form
``func(req, resp, exception)``, where `req` is the request
object that was passed to the responder method, `resp` is
the response object, and `exception` is an instance of
``falcon.HTTPError``.
"""
self._serialize_error = serializer
# ------------------------------------------------------------------------
# Helpers that require self
# ------------------------------------------------------------------------
def _prepare_middleware(self, middleware=None, independent_middleware=False):
return helpers.prepare_middleware(
middleware=middleware, independent_middleware=independent_middleware
)
def _get_responder(self, req):
"""Search routes for a matching responder.
Args:
req (Request): The request object.
Returns:
tuple: A 4-member tuple consisting of a responder callable,
a ``dict`` containing parsed path fields (if any were specified in
the matching route's URI template), a reference to the responder's
resource instance, and the matching URI template.
Note:
If a responder was matched to the given URI, but the HTTP
method was not found in the method_map for the responder,
the responder callable element of the returned tuple will be
`falcon.responder.bad_request`.
Likewise, if no responder was matched for the given URI, then
the responder callable element of the returned tuple will be
`falcon.responder.path_not_found`
"""
path = req.path
method = 'WEBSOCKET' if req.is_websocket else req.method
uri_template = None
route = self._router_search(path, req=req)
if route is not None:
try:
resource, method_map, params, uri_template = route
except ValueError:
# NOTE(kgriffs): Older routers may not return the
# template. But for performance reasons they should at
# least return None if they don't support it.
resource, method_map, params = route
else:
# NOTE(kgriffs): Older routers may indicate that no route
# was found by returning (None, None, None). Therefore, we
# normalize resource as the flag to indicate whether or not
# a route was found, for the sake of backwards-compat.
resource = None
if resource is not None:
try:
responder = method_map[method]
except KeyError:
# NOTE(kgriffs): Dirty hack! We use __class__ here to avoid
# binding self to the default responder method. We could
# decorate the function itself with @staticmethod, but it
# would perhaps be less obvious to the reader why this is
# needed when just looking at the code in the reponder
# module, so we just grab it directly here.
responder = self.__class__._default_responder_bad_request
else:
params = {}
for matcher, obj, is_sink in self._sink_and_static_routes:
m = matcher.match(path)
if m:
if is_sink:
params = m.groupdict()
responder = obj
break
else:
responder = self.__class__._default_responder_path_not_found
return (responder, params, resource, uri_template)
def _compose_status_response(self, req, resp, http_status):
"""Compose a response for the given HTTPStatus instance."""
# PERF(kgriffs): The code to set the status and headers is identical
# to that used in _compose_error_response(), but refactoring in the
# name of DRY isn't worth the extra CPU cycles.
resp.status = http_status.status
if http_status.headers is not None:
resp.set_headers(http_status.headers)
# NOTE(kgriffs): If http_status.text is None, that's OK because
# it's acceptable to set resp.text to None (to indicate no body).
resp.text = http_status.text
def _compose_error_response(self, req, resp, error):
"""Compose a response for the given HTTPError instance."""
resp.status = error.status
if error.headers is not None:
resp.set_headers(error.headers)
self._serialize_error(req, resp, error)
def _http_status_handler(self, req, resp, status, params):
self._compose_status_response(req, resp, status)
def _http_error_handler(self, req, resp, error, params):
self._compose_error_response(req, resp, error)
def _python_error_handler(self, req, resp, error, params):
req.log_error(traceback.format_exc())
self._compose_error_response(req, resp, HTTPInternalServerError())
def _find_error_handler(self, ex):
# NOTE(csojinb): The `__mro__` class attribute returns the method
# resolution order tuple, i.e. the complete linear inheritance chain
# ``(type(ex), ..., object)``. For a valid exception class, the last
# two entries in the tuple will always be ``BaseException``and
# ``object``, so here we iterate over the lineage of exception types,
# from most to least specific.
# PERF(csojinb): The expression ``type(ex).__mro__[:-1]`` here is not
# super readable, but we inline it to avoid function call overhead.
for exc in type(ex).__mro__[:-1]:
handler = self._error_handlers.get(exc)
if handler is not None:
return handler
def _handle_exception(self, req, resp, ex, params):
"""Handle an exception raised from mw or a responder.
Args:
ex: Exception to handle
req: Current request object to pass to the handler
registered for the given exception type
resp: Current response object to pass to the handler
registered for the given exception type
params: Responder params to pass to the handler
registered for the given exception type
Returns:
bool: ``True`` if a handler was found and called for the
exception, ``False`` otherwise.
"""
err_handler = self._find_error_handler(ex)
# NOTE(caselit): Reset body, data and media before calling the handler
resp.text = resp.data = resp.media = None
if err_handler is not None:
try:
err_handler(req, resp, ex, params)
except HTTPStatus as status:
self._compose_status_response(req, resp, status)
except HTTPError as error:
self._compose_error_response(req, resp, error)
return True
# NOTE(kgriffs): No error handlers are defined for ex
# and it is not one of (HTTPStatus, HTTPError), since it
# would have matched one of the corresponding default
# handlers.
return False
# PERF(kgriffs): Moved from api_helpers since it is slightly faster
# to call using self, and this function is called for most
# requests.
def _get_body(self, resp, wsgi_file_wrapper=None):
"""Convert resp content into an iterable as required by PEP 333.
Args:
resp: Instance of falcon.Response
wsgi_file_wrapper: Reference to wsgi.file_wrapper from the
WSGI environ dict, if provided by the WSGI server. Used
when resp.stream is a file-like object (default None).
Returns:
tuple: A two-member tuple of the form (iterable, content_length).
The length is returned as ``None`` when unknown. The
iterable is determined as follows:
* If the result of render_body() is not ``None``, returns
([data], len(data))
* If resp.stream is not ``None``, returns resp.stream
iterable using wsgi.file_wrapper, if necessary:
(closeable_iterator, None)
* Otherwise, returns ([], 0)
"""
data = resp.render_body()
if data is not None:
return [data], len(data)
stream = resp.stream
if stream is not None:
# NOTE(kgriffs): Heuristic to quickly check if stream is
# file-like. Not perfect, but should be good enough until
# proven otherwise.
if hasattr(stream, 'read'):
if wsgi_file_wrapper is not None:
# TODO(kgriffs): Make block size configurable at the
# global level, pending experimentation to see how
# useful that would be. See also the discussion on
# this GitHub PR: http://goo.gl/XGrtDz
iterable = wsgi_file_wrapper(stream, self._STREAM_BLOCK_SIZE)
else:
iterable = helpers.CloseableStreamIterator(
stream, self._STREAM_BLOCK_SIZE
)
else:
iterable = stream
return iterable, None
return [], 0
def _update_sink_and_static_routes(self):
if self._sink_before_static_route:
self._sink_and_static_routes = tuple(self._sinks + self._static_routes)
else:
self._sink_and_static_routes = tuple(self._static_routes + self._sinks)
# FILE falcon/falcon/inspect.py
class RouteInfo(_Traversable):
"""Describes a route.
Args:
path (str): The path of this route.
class_name (str): The class name of the responder of this route.
source_info (str): The source path where this responder was defined.
methods (List[RouteMethodInfo]): List of methods defined in the route.
"""
__visit_name__ = 'route'
def __init__(
self,
path: str,
class_name: str,
source_info: str,
methods: List[RouteMethodInfo],
):
self.path = path
self.class_name = class_name
self.source_info = source_info
self.methods = methods
# FILE falcon/falcon/inspect.py
def inspect_routes(app: App) -> 'List[RouteInfo]':
"""Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
"""
router = app._router
inspect_function = _supported_routers.get(type(router))
if inspect_function is None:
raise TypeError(
'Unsupported router class {}. Use "register_router" '
'to register a function that can inspect the router '
'used by the provided application'.format(type(router))
)
return inspect_function(router)
# FILE falcon/falcon/inspect.py
_supported_routers = {} # type: Dict[Type, Callable]
Based on the information above, please complete the function:
#CURRENT_FILE: falcon/falcon/inspect.py
from functools import partial
import inspect
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from falcon.app import App
from falcon.routing import CompiledRouter
from falcon import app_helpers
def register_router(router_class):
"""Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
"""
|
falcon/falcon/inspect.py
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 25