Browse Source

Docstring conversion #3.

Stefano Cossu 6 years ago
parent
commit
a813ac161f

+ 8 - 8
lakesuperior/endpoints/admin.py

@@ -13,21 +13,21 @@ admin = Blueprint('admin', __name__)
 
 @admin.route('/stats', methods=['GET'])
 def stats():
-    '''
+    """
     Get repository statistics.
-    '''
+    """
     def fsize_fmt(num, suffix='b'):
-        '''
+        """
         Format an integer into 1024-block file size format.
 
         Adapted from Python 2 code on
         https://stackoverflow.com/a/1094933/3758232
 
-        @param num (int) Size value in bytes.
-        @param suffix (string) Suffix label (defaults to `B`).
+        :param int num: Size value in bytes.
+        :param string suffix: Suffix label (defaults to `B`).
 
         @return string Formatted size to largest fitting unit.
-        '''
+        """
         for unit in ['','K','M','G','T','P','E','Z']:
             if abs(num) < 1024.0:
                 return "{:3.1f} {}{}".format(num, unit, suffix)
@@ -42,9 +42,9 @@ def stats():
 
 @admin.route('/tools', methods=['GET'])
 def admin_tools():
-    '''
+    """
     Admin tools.
 
     @TODO stub.
-    '''
+    """
     return render_template('admin_tools.html')

+ 41 - 41
lakesuperior/endpoints/ldp.py

@@ -62,7 +62,7 @@ std_headers = {
     #'Allow' : ','.join(allow),
 }
 
-'''Predicates excluded by view.'''
+"""Predicates excluded by view."""
 vw_blacklist = {
 }
 
@@ -112,17 +112,17 @@ def log_request_end(rsp):
 @ldp.route('/<path:uid>/fcr:content', defaults={'out_fmt' : 'non_rdf'},
         methods=['GET'])
 def get_resource(uid, out_fmt=None):
-    '''
+    """
     https://www.w3.org/TR/ldp/#ldpr-HTTP_GET
 
     Retrieve RDF or binary content.
 
-    @param uid (string) UID of resource to retrieve. The repository root has
+    :param str uid: UID of resource to retrieve. The repository root has
     an empty string for UID.
-    @param out_fmt (string) Force output to RDF or non-RDF if the resource is
+    :param str out_fmt: Force output to RDF or non-RDF if the resource is
     a LDP-NR. This is not available in the API but is used e.g. by the
     `*/fcr:metadata` and `*/fcr:content` endpoints. The default is False.
-    '''
+    """
     logger.info('UID: {}'.format(uid))
     out_headers = std_headers
     repr_options = defaultdict(dict)
@@ -169,9 +169,9 @@ def get_resource(uid, out_fmt=None):
 
 @ldp.route('/<path:uid>/fcr:versions', methods=['GET'])
 def get_version_info(uid):
-    '''
+    """
     Get version info (`fcr:versions`).
-    '''
+    """
     try:
         gr = rsrc_api.get_version_info(uid)
     except ResourceNotExistsError as e:
@@ -186,12 +186,12 @@ def get_version_info(uid):
 
 @ldp.route('/<path:uid>/fcr:versions/<ver_uid>', methods=['GET'])
 def get_version(uid, ver_uid):
-    '''
+    """
     Get an individual resource version.
 
-    @param uid (string) Resource UID.
-    @param ver_uid (string) Version UID.
-    '''
+    :param str uid: Resource UID.
+    :param str ver_uid: Version UID.
+    """
     try:
         gr = rsrc_api.get_version(uid, ver_uid)
     except ResourceNotExistsError as e:
@@ -208,11 +208,11 @@ def get_version(uid, ver_uid):
 @ldp.route('/', defaults={'parent_uid': '/'}, methods=['POST'],
         strict_slashes=False)
 def post_resource(parent_uid):
-    '''
+    """
     https://www.w3.org/TR/ldp/#ldpr-HTTP_POST
 
     Add a new resource in a new URI.
-    '''
+    """
     out_headers = std_headers
     try:
         slug = request.headers['Slug']
@@ -261,11 +261,11 @@ def post_resource(parent_uid):
 @ldp.route('/<path:uid>/fcr:metadata', defaults={'force_rdf' : True},
         methods=['PUT'])
 def put_resource(uid):
-    '''
+    """
     https://www.w3.org/TR/ldp/#ldpr-HTTP_PUT
 
     Add or replace a new resource at a specified URI.
-    '''
+    """
     # Parse headers.
     logger.debug('Request headers: {}'.format(request.headers))
 
@@ -310,11 +310,11 @@ def put_resource(uid):
 
 @ldp.route('/<path:uid>', methods=['PATCH'], strict_slashes=False)
 def patch_resource(uid, is_metadata=False):
-    '''
+    """
     https://www.w3.org/TR/ldp/#ldpr-HTTP_PATCH
 
     Update an existing resource with a SPARQL-UPDATE payload.
-    '''
+    """
     rsp_headers = {'Content-Type' : 'text/plain; charset=utf-8'}
     if request.mimetype != 'application/sparql-update':
         return 'Provided content type is not a valid parsable format: {}'\
@@ -344,7 +344,7 @@ def patch_resource_metadata(uid):
 
 @ldp.route('/<path:uid>', methods=['DELETE'])
 def delete_resource(uid):
-    '''
+    """
     Delete a resource and optionally leave a tombstone.
 
     This behaves differently from FCREPO. A tombstone indicated that the
@@ -355,7 +355,7 @@ def delete_resource(uid):
     In order to completely wipe out all traces of a resource, the tombstone
     must be deleted as well, or the `Prefer:no-tombstone` header can be used.
     The latter will forget (completely delete) the resource immediately.
-    '''
+    """
     headers = std_headers
 
     if 'prefer' in request.headers:
@@ -377,12 +377,12 @@ def delete_resource(uid):
 @ldp.route('/<path:uid>/fcr:tombstone', methods=['GET', 'POST', 'PUT',
         'PATCH', 'DELETE'])
 def tombstone(uid):
-    '''
+    """
     Handle all tombstone operations.
 
     The only allowed methods are POST and DELETE; any other verb will return a
     405.
-    '''
+    """
     try:
         rsrc = rsrc_api.get(uid)
     except TombstoneError as e:
@@ -409,9 +409,9 @@ def tombstone(uid):
 
 @ldp.route('/<path:uid>/fcr:versions', methods=['POST', 'PUT'])
 def post_version(uid):
-    '''
+    """
     Create a new resource version.
-    '''
+    """
     if request.method == 'PUT':
         return 'Method not allowed.', 405
     ver_uid = request.headers.get('slug', None)
@@ -430,14 +430,14 @@ def post_version(uid):
 
 @ldp.route('/<path:uid>/fcr:versions/<ver_uid>', methods=['PATCH'])
 def patch_version(uid, ver_uid):
-    '''
+    """
     Revert to a previous version.
 
     NOTE: This creates a new version snapshot.
 
-    @param uid (string) Resource UID.
-    @param ver_uid (string) Version UID.
-    '''
+    :param str uid: Resource UID.
+    :param str ver_uid: Version UID.
+    """
     try:
         LdpFactory.from_stored(uid).revert_to_version(ver_uid)
     except ResourceNotExistsError as e:
@@ -453,9 +453,9 @@ def patch_version(uid, ver_uid):
 ## PRIVATE METHODS ##
 
 def _negotiate_content(gr, headers=None, **vw_kwargs):
-    '''
+    """
     Return HTML or serialized RDF depending on accept headers.
-    '''
+    """
     if request.accept_mimetypes.best == 'text/html':
         return render_template(
                 'resource.html', gr=gr, nsc=nsc, nsm=nsm,
@@ -467,9 +467,9 @@ def _negotiate_content(gr, headers=None, **vw_kwargs):
 
 
 def _bistream_from_req():
-    '''
+    """
     Find how a binary file and its MIMEtype were uploaded in the request.
-    '''
+    """
     #logger.debug('Content type: {}'.format(request.mimetype))
     #logger.debug('files: {}'.format(request.files))
     #logger.debug('stream: {}'.format(request.stream))
@@ -508,9 +508,9 @@ def _tombstone_response(e, uid):
 
 
 def set_post_put_params():
-    '''
+    """
     Sets handling and content disposition for POST and PUT by parsing headers.
-    '''
+    """
     handling = 'strict'
     if 'prefer' in request.headers:
         prefer = g.tbox.parse_rfc7240(request.headers['prefer'])
@@ -528,10 +528,10 @@ def set_post_put_params():
 
 
 def is_accept_hdr_rdf_parsable():
-    '''
+    """
     Check if any of the 'Accept' header values provided is a RDF parsable
     format.
-    '''
+    """
     for mimetype in request.accept_mimetypes.values():
         if LdpFactory.is_rdf_parsable(mimetype):
             return True
@@ -539,14 +539,14 @@ def is_accept_hdr_rdf_parsable():
 
 
 def parse_repr_options(retr_opts):
-    '''
+    """
     Set options to retrieve IMR.
 
     Ideally, IMR retrieval is done once per request, so all the options
     are set once in the `imr()` property.
 
-    @param retr_opts (dict): Options parsed from `Prefer` header.
-    '''
+    :param dict retr_opts:: Options parsed from `Prefer` header.
+    """
     logger.debug('Parsing retrieval options: {}'.format(retr_opts))
     imr_options = {}
 
@@ -591,12 +591,12 @@ def parse_repr_options(retr_opts):
 
 
 def _headers_from_metadata(rsrc):
-    '''
+    """
     Create a dict of headers from a metadata graph.
 
-    @param rsrc (lakesuperior.model.ldpr.Ldpr) Resource to extract metadata
+    :param lakesuperior.model.ldpr.Ldpr rsrc: Resource to extract metadata
     from.
-    '''
+    """
     out_headers = defaultdict(list)
 
     digest = rsrc.metadata.value(nsc['premis'].hasMessageDigest)

+ 2 - 6
lakesuperior/endpoints/main.py

@@ -13,17 +13,13 @@ main = Blueprint('main', __name__, template_folder='templates',
 
 @main.route('/', methods=['GET'])
 def index():
-    '''
-    Homepage.
-    '''
+    """Homepage."""
     return render_template('index.html')
 
 
 @main.route('/debug', methods=['GET'])
 def debug():
-    '''
-    Debug page.
-    '''
+    """Debug page."""
     raise RuntimeError()
 
 

+ 5 - 5
lakesuperior/endpoints/query.py

@@ -20,9 +20,9 @@ query = Blueprint('query', __name__)
 
 @query.route('/term_search', methods=['GET'])
 def term_search():
-    '''
+    """
     Search by entering a search term and optional property and comparison term.
-    '''
+    """
     valid_operands = (
         ('=', 'Equals'),
         ('>', 'Greater Than'),
@@ -40,11 +40,11 @@ def term_search():
 
 @query.route('/sparql', methods=['GET', 'POST'])
 def sparql():
-    '''
+    """
     Perform a direct SPARQL query on the underlying triplestore.
 
-    @param qry SPARQL query string.
-    '''
+    :param str qry: SPARQL query string.
+    """
     accept_mimetypes = {
         'text/csv': 'csv',
         'application/sparql-results+json': 'json',

+ 14 - 18
lakesuperior/messaging/formatters.py

@@ -8,12 +8,12 @@ from lakesuperior.globals import RES_CREATED, RES_DELETED, RES_UPDATED
 
 
 class BaseASFormatter(metaclass=ABCMeta):
-    '''
+    """
     Format message as ActivityStreams.
 
     This is not really a `logging.Formatter` subclass, but a plain string
     builder.
-    '''
+    """
     ev_types = {
         RES_CREATED : 'Create',
         RES_DELETED : 'Delete',
@@ -28,7 +28,7 @@ class BaseASFormatter(metaclass=ABCMeta):
 
     def __init__(
             self, rsrc_uri, ev_type, timestamp, rsrc_type, actor, data=None):
-        '''
+        """
         Format output according to granularity level.
 
         NOTE: Granularity level does not refer to the logging levels, i.e.
@@ -36,14 +36,14 @@ class BaseASFormatter(metaclass=ABCMeta):
         are logged under the same level. This it is rather about *what* gets
         logged in a message.
 
-        @param rsrc_uri (rdflib.URIRef) URI of the resource.
-        @param ev_type (string) one of `create`, `delete` or `update`
-        @param timestamp (string) Timestamp of the event.
-        @param data (tuple(set)) if messaging is configured with `provenance`
+        :param rdflib.URIRef rsrc_uri: URI of the resource.
+        :param str ev_type: one of `create`, `delete` or `update`
+        :param str timestamp: Timestamp of the event.
+        :param  data: (tuple(set)) if messaging is configured with `provenance`
         level, this is a 2-tuple with one set (as 3-tuples of
         RDFlib.Identifier instances) for removed triples, and one set for
         added triples.
-        '''
+        """
         self.rsrc_uri = rsrc_uri
         self.ev_type = ev_type
         self.timestamp = timestamp
@@ -59,15 +59,13 @@ class BaseASFormatter(metaclass=ABCMeta):
 
 
 class ASResourceFormatter(BaseASFormatter):
-    '''
+    """
     Sends information about a resource being created, updated or deleted, by
     who and when, with no further information about what changed.
-    '''
+    """
 
     def __str__(self):
-        '''
-        Output structured data as string.
-        '''
+        """Output structured data as string."""
         ret = {
             '@context': 'https://www.w3.org/ns/activitystreams',
             'id' : 'urn:uuid:{}'.format(uuid.uuid4()),
@@ -86,15 +84,13 @@ class ASResourceFormatter(BaseASFormatter):
 
 
 class ASDeltaFormatter(BaseASFormatter):
-    '''
+    """
     Sends the same information as `ASResourceFormatter` with the addition of
     the triples that were added and the ones that were removed in the request.
     This may be used to send rich provenance data to a preservation system.
-    '''
+    """
     def __str__(self):
-        '''
-        Output structured data as string.
-        '''
+        """Output structured data as string."""
         ret = {
             '@context': 'https://www.w3.org/ns/activitystreams',
             'id' : 'urn:uuid:{}'.format(uuid.uuid4()),

+ 4 - 8
lakesuperior/messaging/handlers.py

@@ -4,13 +4,13 @@ import stomp
 
 
 class StompHandler(logging.Handler):
-    '''
+    """
     Send messages to a remote queue broker using the STOMP protocol.
 
     This module is named and configured separately from
     standard logging for clarity about its scope: while logging has an
     informational purpose, this module has a functional one.
-    '''
+    """
     def __init__(self, conf):
         self.conf = conf
         if self.conf['protocol'] == '11':
@@ -32,15 +32,11 @@ class StompHandler(logging.Handler):
 
 
     def __del_(self):
-        '''
-        Disconnect the client.
-        '''
+        """Disconnect the client."""
         self.conn.disconnect()
 
     def emit(self, record):
-        '''
-        Send the message to the destination endpoint.
-        '''
+        """Send the message to the destination endpoint."""
         self.conn.send(destination=self.conf['destination'],
                 body=bytes(self.format(record), 'utf-8'))
 

+ 6 - 8
lakesuperior/messaging/messenger.py

@@ -7,15 +7,15 @@ messenger = logging.getLogger('_messenger')
 
 
 class Messenger:
-    '''
+    """
     Very simple message sender using the standard Python logging facility.
-    '''
+    """
     def __init__(self, config):
-        '''
+        """
         Set up the messenger.
 
-        @param config (dict) Messenger configuration.
-        '''
+        :param dict config: Messenger configuration.
+        """
         def msg_routes():
             for route in config['routes']:
                 handler_cls = getattr(handlers, route['handler'])
@@ -31,8 +31,6 @@ class Messenger:
 
 
     def send(self, *args, **kwargs):
-        '''
-        Send one or more external messages.
-        '''
+        """Send one or more external messages."""
         for msg, fn in self.msg_routes:
             msg.info(fn(*args, **kwargs))

+ 20 - 20
lakesuperior/model/ldp_factory.py

@@ -26,10 +26,10 @@ logger = logging.getLogger(__name__)
 
 
 class LdpFactory:
-    '''
+    """
     Generate LDP instances.
     The instance classes are based on provided client data or on stored data.
-    '''
+    """
     @staticmethod
     def new_container(uid):
         if not uid.startswith('/') or uid == '/':
@@ -43,7 +43,7 @@ class LdpFactory:
 
     @staticmethod
     def from_stored(uid, repr_opts={}, **kwargs):
-        '''
+        """
         Create an instance for retrieval purposes.
 
         This factory method creates and returns an instance of an LDPR subclass
@@ -52,8 +52,8 @@ class LdpFactory:
 
         N.B. The resource must exist.
 
-        @param uid UID of the instance.
-        '''
+        :param  uid: UID of the instance.
+        """
         #logger.info('Retrieving stored resource: {}'.format(uid))
         imr_urn = nsc['fcres'][uid]
 
@@ -80,16 +80,16 @@ class LdpFactory:
     @staticmethod
     def from_provided(
             uid, mimetype=None, stream=None, provided_imr=None, **kwargs):
-        '''
+        """
         Determine LDP type from request content.
 
-        @param uid (string) UID of the resource to be created or updated.
-        @param mimetype (string) The provided content MIME type.
-        @param stream (IOStream | None) The provided data stream. This can be
+        :param str uid: UID of the resource to be created or updated.
+        :param str mimetype: The provided content MIME type.
+        :param IOStream | None stream: The provided data stream. This can be
         RDF or non-RDF content, or None. In the latter case, an empty container
         is created.
         @param **kwargs Arguments passed to the LDP class constructor.
-        '''
+        """
         uri = nsc['fcres'][uid]
 
         if not stream and not mimetype:
@@ -149,11 +149,11 @@ class LdpFactory:
 
     @staticmethod
     def is_rdf_parsable(mimetype):
-        '''
+        """
         Checks whether a MIME type support RDF parsing by a RDFLib plugin.
 
-        @param mimetype (string) MIME type to check.
-        '''
+        :param str mimetype: MIME type to check.
+        """
         try:
             plugin.get(mimetype, parser.Parser)
         except plugin.PluginException:
@@ -164,11 +164,11 @@ class LdpFactory:
 
     @staticmethod
     def is_rdf_serializable(mimetype):
-        '''
+        """
         Checks whether a MIME type support RDF serialization by a RDFLib plugin
 
-        @param mimetype (string) MIME type to check.
-        '''
+        :param str mimetype: MIME type to check.
+        """
         try:
             plugin.get(mimetype, serializer.Serializer)
         except plugin.PluginException:
@@ -179,7 +179,7 @@ class LdpFactory:
 
     @staticmethod
     def mint_uid(parent_uid, path=None):
-        '''
+        """
         Mint a new resource UID based on client directives.
 
         This method takes a parent ID and a tentative path and returns an LDP
@@ -188,13 +188,13 @@ class LdpFactory:
         This may raise an exception resulting in a 404 if the parent is not
         found or a 409 if the parent is not a valid container.
 
-        @param parent_uid (string) UID of the parent resource. It must be an
+        :param str parent_uid: UID of the parent resource. It must be an
         existing LDPC.
-        @param path (string) path to the resource, relative to the parent.
+        :param str path: path to the resource, relative to the parent.
 
         @return string The confirmed resource UID. This may be different from
         what has been indicated.
-        '''
+        """
         def split_if_legacy(uid):
             if config['application']['store']['ldp_rs']['legacy_ptree_split']:
                 uid = tbox.split_uuid(uid)

+ 14 - 13
lakesuperior/model/ldp_nr.py

@@ -17,10 +17,10 @@ logger = logging.getLogger(__name__)
 
 
 class LdpNr(Ldpr):
-    '''LDP-NR (Non-RDF Source).
+    """LDP-NR (Non-RDF Source).
 
     Definition: https://www.w3.org/TR/ldp/#ldpnr
-    '''
+    """
 
     base_types = {
         nsc['fcrepo'].Binary,
@@ -31,9 +31,9 @@ class LdpNr(Ldpr):
 
     def __init__(self, uuid, stream=None, mimetype=None,
             disposition=None, **kwargs):
-        '''
+        """
         Extends Ldpr.__init__ by adding LDP-NR specific parameters.
-        '''
+        """
         super().__init__(uuid, **kwargs)
 
         self._imr_options = {}
@@ -68,11 +68,12 @@ class LdpNr(Ldpr):
 
 
     def create_or_replace(self, create_only=False):
-        '''
+        """
         Create a new binary resource with a corresponding RDF representation.
 
-        @param file (Stream) A Stream resource representing the uploaded file.
-        '''
+        :param bool create_only: Whether the resource is being created or
+        updated.
+        """
         # Persist the stream.
         self.digest, self.size = nonrdfly.persist(self.stream)
 
@@ -91,14 +92,14 @@ class LdpNr(Ldpr):
     ## PROTECTED METHODS ##
 
     def _add_srv_mgd_triples(self, create=False):
-        '''
+        """
         Add all metadata for the RDF representation of the LDP-NR.
 
-        @param stream (BufferedIO) The uploaded data stream.
-        @param mimetype (string) MIME type of the uploaded file.
-        @param disposition (defaultdict) The `Content-Disposition` header
-        content, parsed through `parse_rfc7240`.
-        '''
+        :param BufferedIO stream: The uploaded data stream.
+        :param str mimetype: MIME type of the uploaded file.
+        :param defaultdict disposition: The ``Content-Disposition`` header
+        content, parsed through ``parse_rfc7240``.
+        """
         super()._add_srv_mgd_triples(create)
 
         # File size.

+ 15 - 17
lakesuperior/model/ldp_rs.py

@@ -12,20 +12,21 @@ logger = logging.getLogger(__name__)
 
 
 class LdpRs(Ldpr):
-    '''
+    """
     LDP-RS (LDP RDF source).
 
     https://www.w3.org/TR/ldp/#ldprs
-    '''
+    """
     def __init__(self, uuid, repr_opts={}, handling='lenient', **kwargs):
-        '''
-        Extends Ldpr.__init__ by adding LDP-RS specific parameters.
-
-        @param handling (string) One of `strict`, `lenient` (the default) or
-        `none`. `strict` raises an error if a server-managed term is in the
-        graph. `lenient` removes all sever-managed triples encountered. `none`
-        skips all server-managed checks. It is used for internal modifications.
-        '''
+        """
+        Extends :meth:`Ldpr.__init__`by adding LDP-RS specific parameters.
+
+        :param str handling: One of ``strict``, ``lenient`` (the default) or
+        ``none``. ``strict`` raises an error if a server-managed term is in the
+        graph. ``lenient`` removes all sever-managed triples encountered.
+        ``none`` skips all server-managed checks. It is used for internal
+        modifications.
+        """
         super().__init__(uuid, **kwargs)
         self.base_types = super().base_types | {
             nsc['fcrepo'].Container,
@@ -44,8 +45,7 @@ class LdpRs(Ldpr):
 
 
 class Ldpc(LdpRs):
-    '''LDPC (LDP Container).'''
-
+    """LDPC (LDP Container)."""
     def __init__(self, uuid, *args, **kwargs):
         super().__init__(uuid, *args, **kwargs)
         self.base_types |= {
@@ -56,7 +56,7 @@ class Ldpc(LdpRs):
 
 
 class LdpBc(Ldpc):
-    '''LDP-BC (LDP Basic Container).'''
+    """LDP-BC (LDP Basic Container)."""
     def __init__(self, uuid, *args, **kwargs):
         super().__init__(uuid, *args, **kwargs)
         self.base_types |= {
@@ -66,8 +66,7 @@ class LdpBc(Ldpc):
 
 
 class LdpDc(Ldpc):
-    '''LDP-DC (LDP Direct Container).'''
-
+    """LDP-DC (LDP Direct Container)."""
     def __init__(self, uuid, *args, **kwargs):
         super().__init__(uuid, *args, **kwargs)
         self.base_types |= {
@@ -77,8 +76,7 @@ class LdpDc(Ldpc):
 
 
 class LdpIc(Ldpc):
-    '''LDP-IC (LDP Indirect Container).'''
-
+    """LDP-IC (LDP Indirect Container)."""
     def __init__(self, uuid, *args, **kwargs):
         super().__init__(uuid, *args, **kwargs)
         self.base_types |= {

+ 15 - 14
lakesuperior/model/ldpr.py

@@ -106,8 +106,8 @@ class Ldpr(metaclass=ABCMeta):
     ## MAGIC METHODS ##
 
     def __init__(self, uid, repr_opts={}, provided_imr=None, **kwargs):
-        """Instantiate an in-memory LDP resource that can be loaded from and
-        persisted to storage.
+        """
+        Instantiate an in-memory LDP resource.
 
         :param str uid: uid of the resource. If None (must be explicitly
         set) it refers to the root node. It can also be the full URI or URN,
@@ -136,7 +136,7 @@ class Ldpr(metaclass=ABCMeta):
         The RDFLib resource representing this LDPR. This is a live
         representation of the stored data if present.
 
-        @return rdflib.resource.Resource
+        :rtype: rdflib.resource.Resource
         """
         if not hasattr(self, '_rsrc'):
             self._rsrc = rdfly.ds.resource(self.uri)
@@ -285,7 +285,7 @@ class Ldpr(metaclass=ABCMeta):
     def types(self):
         """All RDF types.
 
-        @return set(rdflib.term.URIRef)
+        :rtype: set(rdflib.term.URIRef)
         """
         if not hasattr(self, '_types'):
             if len(self.metadata.graph):
@@ -305,7 +305,7 @@ class Ldpr(metaclass=ABCMeta):
     def ldp_types(self):
         """The LDP types.
 
-        @return set(rdflib.term.URIRef)
+        :rtype: set(rdflib.term.URIRef)
         """
         if not hasattr(self, '_ldp_types'):
             self._ldp_types = {t for t in self.types if nsc['ldp'] in t}
@@ -486,14 +486,14 @@ class Ldpr(metaclass=ABCMeta):
         """
         tstone_trp = set(rdfly.extract_imr(self.uid, strict=False).graph)
 
-        ver_rsp = self.version_info.graph.query("""
+        ver_rsp = self.version_info.graph.query('''
         SELECT ?uid {
           ?latest fcrepo:hasVersionLabel ?uid ;
             fcrepo:created ?ts .
         }
         ORDER BY DESC(?ts)
         LIMIT 1
-        """)
+        ''')
         ver_uid = str(ver_rsp.bindings[0]['uid'])
         ver_trp = set(rdfly.get_metadata(self.uid, ver_uid).graph)
 
@@ -686,7 +686,7 @@ class Ldpr(metaclass=ABCMeta):
         """
         Add server-managed triples to a provided IMR.
 
-        :param  create: Whether the resource is being created.
+        :param create: Whether the resource is being created.
         """
         # Base LDP types.
         for t in self.base_types:
@@ -725,11 +725,11 @@ class Ldpr(metaclass=ABCMeta):
         is found.
 
         E.g. if only fcres:/a exists:
-        - If fcres:/a/b/c/d is being created, a becomes container of
-          fcres:/a/b/c/d. Also, containers are created for fcres:a/b and
-          fcres:/a/b/c.
-        - If fcres:/e is being created, the root node becomes container of
-          fcres:/e.
+        - If ``fcres:/a/b/c/d`` is being created, a becomes container of
+          ``fcres:/a/b/c/d``. Also, containers are created for fcres:a/b and
+          ``fcres:/a/b/c``.
+        - If ``fcres:/e`` is being created, the root node becomes container of
+          ``fcres:/e``.
 
         :param bool create: Whether the resource is being created. If false,
         the parent container is not updated.
@@ -775,7 +775,8 @@ class Ldpr(metaclass=ABCMeta):
         Remove duplicate triples from add and remove delta graphs, which would
         otherwise contain unnecessary statements that annul each other.
 
-        @return tuple 2 "clean" sets of respectively remove statements and
+        :rtype: tuple
+        :return: 2 "clean" sets of respectively remove statements and
         add statements.
         """
         return (

+ 7 - 13
lakesuperior/store/ldp_nr/base_non_rdf_layout.py

@@ -7,18 +7,18 @@ logger = logging.getLogger(__name__)
 
 
 class BaseNonRdfLayout(metaclass=ABCMeta):
-    '''
+    """
     Abstract class for setting the non-RDF (bitstream) store layout.
 
     Differerent layouts can be created by implementing all the abstract methods
     of this class. A non-RDF layout is not necessarily restricted to a
     traditional filesystem—e.g. a layout persisting to HDFS can be written too.
-    '''
+    """
 
     def __init__(self, config):
-        '''
+        """
         Initialize the base non-RDF store layout.
-        '''
+        """
         self.config = config
         self.root = config['path']
 
@@ -27,23 +27,17 @@ class BaseNonRdfLayout(metaclass=ABCMeta):
 
     @abstractmethod
     def persist(self, stream):
-        '''
-        Store the stream in the designated persistence layer for this layout.
-        '''
+        """Store the stream in the designated persistence layer."""
         pass
 
 
     @abstractmethod
     def delete(self, id):
-        '''
-        Delete a stream by its identifier (i.e. checksum).
-        '''
+        """Delete a stream by its identifier (i.e. checksum)."""
         pass
 
 
     @abstractmethod
     def local_path(self, uuid):
-        '''
-        Return the local path of a file.
-        '''
+        """Return the local path of a file."""
         pass

+ 15 - 18
lakesuperior/store/ldp_nr/default_layout.py

@@ -12,18 +12,21 @@ logger = logging.getLogger(__name__)
 
 
 class DefaultLayout(BaseNonRdfLayout):
-    '''
+    """
     Default file layout.
-    '''
+
+    This is a simple filesystem layout that stores binaries in pairtree folders
+    in a local filesystem. Parameters can be specified for the 
+    """
     @staticmethod
     def local_path(root, uuid, bl=4, bc=4):
-        '''
+        """
         Generate the resource path splitting the resource checksum according to
         configuration parameters.
 
-        @param uuid (string) The resource UUID. This corresponds to the content
+        :param str uuid: The resource UUID. This corresponds to the content
         checksum.
-        '''
+        """
         logger.debug('Generating path from uuid: {}'.format(uuid))
         term = len(uuid) if bc == 0 else min(bc * bl, len(uuid))
 
@@ -37,9 +40,7 @@ class DefaultLayout(BaseNonRdfLayout):
 
 
     def __init__(self, *args, **kwargs):
-        '''
-        Set up path segmentation parameters.
-        '''
+        """Set up path segmentation parameters."""
         super().__init__(*args, **kwargs)
 
         self.bl = self.config['pairtree_branch_length']
@@ -49,9 +50,7 @@ class DefaultLayout(BaseNonRdfLayout):
     ## INTERFACE METHODS ##
 
     def bootstrap(self):
-        '''
-        Initialize binary file store.
-        '''
+        """Initialize binary file store."""
         try:
             shutil.rmtree(self.root)
         except FileNotFoundError:
@@ -60,7 +59,7 @@ class DefaultLayout(BaseNonRdfLayout):
 
 
     def persist(self, stream, bufsize=8192):
-        '''
+        """
         Store the stream in the file system.
 
         This method handles the file in chunks. for each chunk it writes to a
@@ -68,9 +67,9 @@ class DefaultLayout(BaseNonRdfLayout):
         to disk and hashed, the temp file is moved to its final location which
         is determined by the hash value.
 
-        @param stream (IOstream): file-like object to persist.
-        @param bufsize (int) Chunk size. 2**12 to 2**15 is a good range.
-        '''
+        :param IOstream stream:: file-like object to persist.
+        :param int bufsize: Chunk size. 2**12 to 2**15 is a good range.
+        """
         tmp_file = '{}/tmp/{}'.format(self.root, uuid4())
         try:
             with open(tmp_file, 'wb') as f:
@@ -111,7 +110,5 @@ class DefaultLayout(BaseNonRdfLayout):
 
 
     def delete(self, uuid):
-        '''
-        See BaseNonRdfLayout.delete.
-        '''
+        """See BaseNonRdfLayout.delete."""
         os.unlink(__class__.local_path(self.root, uuid, self.bl, self.bc))

+ 162 - 167
lakesuperior/store/ldp_rs/lmdb_store.py

@@ -19,21 +19,21 @@ logger = logging.getLogger(__name__)
 
 
 def s2b(u, enc='UTF-8'):
-    '''
+    """
     Convert a string into a bytes object.
-    '''
+    """
     return u.encode(enc)
 
 
 def b2s(u, enc='UTF-8'):
-    '''
+    """
     Convert a bytes or memoryview object into a string.
-    '''
+    """
     return bytes(u).decode(enc)
 
 
 class TxnManager(ContextDecorator):
-    '''
+    """
     Handle ACID transactions with an LmdbStore.
 
     Wrap this within a `with` statement:
@@ -43,15 +43,15 @@ class TxnManager(ContextDecorator):
     >>>
 
     The transaction will be opened and handled automatically.
-    '''
+    """
     def __init__(self, store, write=False):
-        '''
+        """
         Begin and close a transaction in a store.
 
-        @param store (LmdbStore) The store to open a transaction on.
-        @param write (bool) Whether the transaction is read-write. Default is
+        :param LmdbStore store: The store to open a transaction on.
+        :param bool write: Whether the transaction is read-write. Default is
         False (read-only transaction).
-        '''
+        """
         self.store = store
         self.write = write
 
@@ -69,34 +69,34 @@ class TxnManager(ContextDecorator):
 
 
 class LexicalSequence:
-    '''
+    """
     Fixed-length lexicographically ordered byte sequence.
 
     Useful to generate optimized sequences of keys in LMDB.
-    '''
+    """
     def __init__(self, start=1, max_len=5):
-        '''
-        @param start (bytes) Starting byte value. Bytes below this value are
+        """
+        Create a new lexical sequence.
+
+        :param bytes start: Starting byte value. Bytes below this value are
         never found in this sequence. This is useful to allot special bytes
         to be used e.g. as separators.
-        @param max_len (int) Maximum number of bytes that a byte string can
+        :param int max_len: Maximum number of bytes that a byte string can
         contain. This should be chosen carefully since the number of all
         possible key combinations is determined by this value and the `start`
         value. The default args provide 255**5 (~1 Tn) unique combinations.
-        '''
+        """
         self.start = start
         self.length = max_len
 
 
     def first(self):
-        '''
-        First possible combination.
-        '''
+        """First possible combination."""
         return bytearray([self.start] * self.length)
 
 
     def next(self, n):
-        '''
+        """
         Calculate the next closest byte sequence in lexicographical order.
 
         This is used to fill the next available slot after the last one in
@@ -107,8 +107,8 @@ class LexicalSequence:
         This function assumes that all the keys are padded with the `start`
         value up to the `max_len` length.
 
-        @param n (bytes) Current byte sequence to add to.
-        '''
+        :param bytes n: Current byte sequence to add to.
+        """
         if not n:
             n = self.first()
         elif isinstance(n, bytes) or isinstance(n, memoryview):
@@ -137,7 +137,7 @@ class LexicalSequence:
 
 
 class LmdbStore(Store):
-    '''
+    """
     LMDB-backed store.
 
     This is an implementation of the RDFLib Store interface:
@@ -172,7 +172,7 @@ class LmdbStore(Store):
     (also in a SPARQL query) will look in the  union graph instead of in the
     default graph. Also, removing triples without specifying a context will
     remove triples from all contexts.
-    '''
+    """
 
     context_aware = True
     # This is a hassle to maintain for no apparent gain. If some use is devised
@@ -181,19 +181,19 @@ class LmdbStore(Store):
     graph_aware = True
     transaction_aware = True
 
-    '''
+    """
     LMDB map size. See http://lmdb.readthedocs.io/en/release/#environment-class
-    '''
+    """
     MAP_SIZE = 1024 ** 4 # 1Tb
 
-    '''
+    """
     Key hashing algorithm. If you are paranoid, use SHA1. Otherwise, MD5 is
     faster and takes up less space (16 bytes vs. 20 bytes). This may make a
     visible difference because keys are generated and parsed very often.
-    '''
+    """
     KEY_HASH_ALGO = 'sha1'
 
-    '''
+    """
     Fixed length for term keys.
 
     4 or 5 is a safe range. 4 allows for ~4 billion (256 ** 4) unique terms
@@ -209,13 +209,13 @@ class LmdbStore(Store):
     could improve performance since keys make up the vast majority of record
     exchange between the store and the application. However it is sensible not
     to expose this value as a configuration option.
-    '''
+    """
     KEY_LENGTH = 5
 
-    '''
+    """
     Lexical sequence start. `\x01` is fine since no special characters are used,
     but it's good to leave a spare for potential future use.
-    '''
+    """
     KEY_START = 1
 
     data_keys = (
@@ -237,19 +237,19 @@ class LmdbStore(Store):
         's:po', 'p:so', 'o:sp', 'c:spo',
     )
 
-    '''
+    """
     Order in which keys are looked up if two terms are bound.
     The indices with the smallest average number of values per key should be
     looked up first.
 
     If we want to get fancy, this can be rebalanced from time to time by
     looking up the number of keys in (s:po, p:so, o:sp).
-    '''
+    """
     _lookup_rank = ('s', 'o', 'p')
 
-    '''
+    """
     Order of terms in the lookup indices. Used to rebuild a triple from lookup.
-    '''
+    """
     _lookup_ordering = {
         's:po': (0, 1, 2),
         'p:so': (1, 0, 2),
@@ -279,19 +279,17 @@ class LmdbStore(Store):
 
 
     def __del__(self):
-        '''
-        Properly close store for garbage collection.
-        '''
+        """Properly close store for garbage collection."""
         self.close(True)
 
 
     def __len__(self, context=None):
-        '''
+        """
         Return length of the dataset.
 
-        @param context (rdflib.URIRef | rdflib.Graph) Context to restrict count
-        to.
-        '''
+        :param context: Context to restrict count to.
+        :type context: rdflib.URIRef or rdflib.Graph
+        """
         context = self._normalize_context(context)
 
         if context is not None:
@@ -311,7 +309,7 @@ class LmdbStore(Store):
 
 
     def open(self, configuration=None, create=True):
-        '''
+        """
         Open the database.
 
         The database is best left open for the lifespan of the server. Read
@@ -321,7 +319,7 @@ class LmdbStore(Store):
 
         This method is called outside of the main transaction. All cursors
         are created separately within the transaction.
-        '''
+        """
         self._init_db_environments(create)
         if self.data_env == NO_STORE:
             return NO_STORE
@@ -331,9 +329,9 @@ class LmdbStore(Store):
 
 
     def begin(self, write=False):
-        '''
+        """
         Begin the main write transaction and create cursors.
-        '''
+        """
         if not self.is_open:
             raise RuntimeError('Store must be opened first.')
         logger.debug('Beginning a {} transaction.'.format(
@@ -346,9 +344,7 @@ class LmdbStore(Store):
 
 
     def stats(self):
-        '''
-        Gather statistics about the database.
-        '''
+        """Gather statistics about the database."""
         stats = {
             'data_db_stats': {
                 db_label: self.data_txn.stat(self.dbs[db_label])
@@ -368,9 +364,7 @@ class LmdbStore(Store):
 
     @property
     def is_txn_open(self):
-        '''
-        Whether the main transaction is open.
-        '''
+        """Whether the main transaction is open."""
         try:
             self.data_txn.id()
             self.idx_txn.id()
@@ -383,9 +377,7 @@ class LmdbStore(Store):
 
 
     def cur(self, index):
-        '''
-        Return a new cursor by its index.
-        '''
+        """Return a new cursor by its index."""
         if index in self.idx_keys:
             txn = self.idx_txn
             src = self.idx_keys
@@ -399,14 +391,14 @@ class LmdbStore(Store):
 
 
     def get_data_cursors(self, txn):
-        '''
+        """
         Build the main data cursors for a transaction.
 
-        @param txn (lmdb.Transaction) This can be a read or write transaction.
+        :param lmdb.Transaction txn: This can be a read or write transaction.
 
-        @return dict(string, lmdb.Cursor) Keys are index labels, values are
-        index cursors.
-        '''
+        :rtype: dict(string, lmdb.Cursor)
+        :return: Keys are index labels, values are index cursors.
+        """
         return {
             'tk:t': txn.cursor(self.dbs['tk:t']),
             'tk:c': txn.cursor(self.dbs['tk:c']),
@@ -415,25 +407,25 @@ class LmdbStore(Store):
 
 
     def get_idx_cursors(self, txn):
-        '''
+        """
         Build the index cursors for a transaction.
 
-        @param txn (lmdb.Transaction) This can be a read or write transaction.
+        :param lmdb.Transaction txn: This can be a read or write transaction.
 
         @return dict(string, lmdb.Cursor) Keys are index labels, values are
         index cursors.
-        '''
+        """
         return {
             key: txn.cursor(self.dbs[key])
             for key in self.idx_keys}
 
 
     def close(self, commit_pending_transaction=False):
-        '''
+        """
         Close the database connection.
 
         Do this at server shutdown.
-        '''
+        """
         self.__open = False
         if self.is_txn_open:
             if commit_pending_transaction:
@@ -446,26 +438,27 @@ class LmdbStore(Store):
 
 
     def destroy(self, path):
-        '''
+        """
         Destroy the store.
 
         https://www.youtube.com/watch?v=lIVq7FJnPwg
 
-        @param path (string) Path of the folder containing the database(s).
-        '''
+        :param str path: Path of the folder containing the database(s).
+        """
         if exists(path):
             rmtree(path)
 
 
     def add(self, triple, context=None, quoted=False):
-        '''
+        """
         Add a triple and start indexing.
 
-        @param triple (tuple:rdflib.Identifier) Tuple of three identifiers.
-        @param context (rdflib.Identifier | None) Context identifier.
-        'None' inserts in the default graph.
-        @param quoted (bool) Not used.
-        '''
+        :param tuple:rdflib.Identifier triple: Tuple of three identifiers.
+        :param context: Context identifier. ``None`` inserts in the default
+        graph.
+        :type context: rdflib.Identifier or None
+        :param bool quoted: Not used.
+        """
         context = self._normalize_context(context)
         if context is None:
             context = RDFLIB_DEFAULT_GRAPH_URI
@@ -512,16 +505,16 @@ class LmdbStore(Store):
 
 
     def remove(self, triple_pattern, context=None):
-        '''
+        """
         Remove triples by a pattern.
 
-        @param triple_pattern (tuple:rdflib.term.Identifier|None) 3-tuple of
+        :param tuple:rdflib.term.Identifier|None triple_pattern: 3-tuple of
         either RDF terms or None, indicating the triple(s) to be removed.
         None is used as a wildcard.
-        @param context (rdflib.term.Identifier|None) Context to remove the
-        triples from. If None (the default) the matching triples are removed
-        from all contexts.
-        '''
+        :param context: Context to remove the triples from. If None (the
+        default) the matching triples are removed from all contexts.
+        :type context: rdflib.term.Identifier or None
+        """
         #logger.debug('Removing triples by pattern: {} on context: {}'.format(
         #    triple_pattern, context))
         context = self._normalize_context(context)
@@ -562,18 +555,18 @@ class LmdbStore(Store):
 
 
     def triples(self, triple_pattern, context=None):
-        '''
+        """
         Generator over matching triples.
 
-        @param triple_pattern (tuple) 3 RDFLib terms
-        @param context (rdflib.Graph | None) Context graph, if available.
+        :param tuple triple_pattern: 3 RDFLib terms
+        :param rdflib.Graph | None context: Context graph, if available.
 
-        @return Generator over triples and contexts in which each result has
+        :return: Generator over triples and contexts in which each result has
         the following format:
         > (s, p, o), generator(contexts)
         Where the contexts generator lists all context that the triple appears
         in.
-        '''
+        """
         #logger.debug('Getting triples for pattern: {} and context: {}'.format(
         #    triple_pattern, context))
         # This sounds strange, RDFLib should be passing None at this point,
@@ -620,12 +613,12 @@ class LmdbStore(Store):
 
 
     def bind(self, prefix, namespace):
-        '''
+        """
         Bind a prefix to a namespace.
 
-        @param prefix (string) Namespace prefix.
-        @param namespace (rdflib.URIRef) Fully qualified URI of namespace.
-        '''
+        :param str prefix: Namespace prefix.
+        :param rdflib.URIRef namespace: Fully qualified URI of namespace.
+        """
         prefix = s2b(prefix)
         namespace = s2b(namespace)
         if self.is_txn_rw:
@@ -643,44 +636,42 @@ class LmdbStore(Store):
 
 
     def namespace(self, prefix):
-        '''
+        """
         Get the namespace for a prefix.
-        @param prefix (string) Namespace prefix.
-        '''
+        :param str prefix: Namespace prefix.
+        """
         with self.cur('pfx:ns') as cur:
             ns = cur.get(s2b(prefix))
             return Namespace(b2s(ns)) if ns is not None else None
 
 
     def prefix(self, namespace):
-        '''
+        """
         Get the prefix associated with a namespace.
 
         @NOTE A namespace can be only bound to one prefix in this
         implementation.
 
-        @param namespace (rdflib.URIRef) Fully qualified URI of namespace.
-        '''
+        :param rdflib.URIRef namespace: Fully qualified URI of namespace.
+        """
         with self.cur('ns:pfx') as cur:
             prefix = cur.get(s2b(namespace))
             return b2s(prefix) if prefix is not None else None
 
 
     def namespaces(self):
-        '''
-        Get an iterator of all prefix: namespace bindings.
-        '''
+        """Get an iterator of all prefix: namespace bindings."""
         with self.cur('pfx:ns') as cur:
             for pfx, ns in iter(cur):
                 yield (b2s(pfx), Namespace(b2s(ns)))
 
 
     def contexts(self, triple=None):
-        '''
+        """
         Get a list of all contexts.
 
         @return generator(Graph)
-        '''
+        """
         if triple and any(triple):
             with self.cur('spo:c') as cur:
                 if cur.set_key(self._to_key(triple)):
@@ -695,7 +686,7 @@ class LmdbStore(Store):
 
 
     def add_graph(self, graph):
-        '''
+        """
         Add a graph to the database.
 
         This creates an empty graph by associating the graph URI with the
@@ -707,8 +698,8 @@ class LmdbStore(Store):
         Therefore it needs to open a write transaction. This is not ideal
         but the only way to handle datasets in RDFLib.
 
-        @param graph (URIRef) URI of the named graph to add.
-        '''
+        :param URIRef graph: URI of the named graph to add.
+        """
         if isinstance(graph, Graph):
             graph = graph.identifier
         pk_c = self._pickle(graph)
@@ -738,11 +729,11 @@ class LmdbStore(Store):
 
 
     def remove_graph(self, graph):
-        '''
+        """
         Remove all triples from graph and the graph itself.
 
-        @param graph (URIRef) URI of the named graph to remove.
-        '''
+        :param URIRef graph: URI of the named graph to remove.
+        """
         if isinstance(graph, Graph):
             graph = graph.identifier
         self.remove((None, None, None), graph)
@@ -753,9 +744,7 @@ class LmdbStore(Store):
 
 
     def commit(self):
-        '''
-        Commit main transaction and push action queue.
-        '''
+        """Commit main transaction."""
         logger.debug('Committing transaction.')
         try:
             self.data_txn.commit()
@@ -769,9 +758,7 @@ class LmdbStore(Store):
 
 
     def rollback(self):
-        '''
-        Roll back main transaction.
-        '''
+        """Roll back main transaction."""
         logger.debug('Rolling back transaction.')
         try:
             self.data_txn.abort()
@@ -787,16 +774,17 @@ class LmdbStore(Store):
     ## PRIVATE METHODS ##
 
     def _triple_keys(self, triple_pattern, context=None):
-        '''
+        """
         Generator over matching triple keys.
 
         This method is used by `triples` which returns native Python tuples,
         as well as by other methods that need to iterate and filter triple
         keys without incurring in the overhead of converting them to triples.
 
-        @param triple_pattern (tuple) 3 RDFLib terms
-        @param context (rdflib.Graph | None) Context graph or URI, or None.
-        '''
+        :param tuple triple_pattern: 3 RDFLib terms
+        :param context: Context graph or URI, or None.
+        :type context: rdflib.term.Identifier or None
+        """
         if context == self:
             context = None
 
@@ -842,16 +830,16 @@ class LmdbStore(Store):
 
 
     def _init_db_environments(self, create=True):
-        '''
+        """
         Initialize the DB environment.
 
         The main database is kept in one file, the indices in a separate one
         (these may be even further split up depending on performance
         considerations).
 
-        @param create (bool) If True, the environment and its databases are
+        :param bool create: If True, the environment and its databases are
         created.
-        '''
+        """
         path = self.path
         if not exists(path):
             if create is True:
@@ -892,14 +880,15 @@ class LmdbStore(Store):
 
 
     def _from_key(self, key):
-        '''
+        """
         Convert a key into one or more terms.
 
-        @param key (bytes | memoryview) The key to be converted. It can be a
+        :param key: The key to be converted. It can be a
+        :type key: bytes or memoryview
         compound one in which case the function will return multiple terms.
 
         @return tuple
-        '''
+        """
         with self.cur('t:st') as cur:
             return tuple(
                    self._unpickle(cur.get(k))
@@ -907,20 +896,21 @@ class LmdbStore(Store):
 
 
     def _to_key(self, obj):
-        '''
+        """
         Convert a triple, quad or term into a key.
 
         The key is the checksum of the pickled object, therefore unique for
         that object. The hashing algorithm is specified in `KEY_HASH_ALGO`.
 
-        @param obj (Object) Anything that can be reduced to terms stored in the
+        :param Object obj: Anything that can be reduced to terms stored in the
         database. Pairs of terms, as well as triples and quads, are expressed
         as tuples.
 
         If more than one term is provided, the keys are concatenated.
 
-        @return bytes
-        '''
+        :rtype: memoryview
+        :return: Keys stored for the term(s)
+        """
         if not isinstance(obj, list) and not isinstance(obj, tuple):
             obj = (obj,)
         key = []
@@ -936,33 +926,33 @@ class LmdbStore(Store):
 
 
     def _hash(self, s):
-        '''
-        Get the hash value of a serialized object.
-        '''
+        """Get the hash value of a serialized object."""
         return hashlib.new(self.KEY_HASH_ALGO, s).digest()
 
 
     def _split_key(self, keys):
-        '''
+        """
         Split a compound key into individual keys.
 
         This method relies on the fixed length of all term keys.
 
-        @param keys (bytes | memoryview) Concatenated keys.
+        :param keys: Concatenated keys.
+        :type keys: bytes or memoryview
 
-        @return tuple: bytes | memoryview
-        '''
+        :rtype: tuple(memoryview)
+        """
         return tuple(
                 keys[i:i+self.KEY_LENGTH]
                 for i in range(0, len(keys), self.KEY_LENGTH))
 
 
     def _normalize_context(self, context):
-        '''
+        """
         Normalize a context parameter to conform to the model expectations.
 
-        @param context (URIRef | Graph | None) Context URI or graph.
-        '''
+        :param context: Context URI or graph.
+        :type context: URIRef or Graph or None
+        """
         if isinstance(context, Graph):
             if context == self or isinstance(context.identifier, Variable):
                 context = None
@@ -974,11 +964,12 @@ class LmdbStore(Store):
 
 
     def _lookup(self, triple_pattern):
-        '''
+        """
         Look up triples in the indices based on a triple pattern.
 
-        @return iterator of matching triple keys.
-        '''
+        :rtype: Iterator
+        :return: Matching triple keys.
+        """
         s, p, o = triple_pattern
 
         if s is not None:
@@ -1022,15 +1013,16 @@ class LmdbStore(Store):
 
 
     def _lookup_1bound(self, label, term):
-        '''
+        """
         Lookup triples for a pattern with one bound term.
 
-        @param label (string) Which term is being searched for. One of `s`,
+        :param str label: Which term is being searched for. One of `s`,
         `p`, or `o`.
-        @param term (rdflib.URIRef) Bound term to search for.
+        :param rdflib.URIRef term: Bound term to search for.
 
-        @return iterator(bytes) SPO keys matching the pattern.
-        '''
+        :rtype: iterator(bytes)
+        :return: SPO keys matching the pattern.
+        """
         k = self._to_key(term)
         if not k:
             return iter(())
@@ -1051,15 +1043,16 @@ class LmdbStore(Store):
 
 
     def _lookup_2bound(self, bound_terms):
-        '''
+        """
         Look up triples for a pattern with two bound terms.
 
-        @param bound terms (dict) Triple labels and terms to search for,
+        :param  bound: terms (dict) Triple labels and terms to search for,
         in the format of, e.g. {'s': URIRef('urn:s:1'), 'o':
         URIRef('urn:o:1')}
 
-        @return iterator(bytes) SPO keys matching the pattern.
-        '''
+        :rtype: iterator(bytes)
+        :return: SPO keys matching the pattern.
+        """
         if len(bound_terms) != 2:
             raise ValueError(
                     'Exactly 2 terms need to be bound. Got {}'.format(
@@ -1112,14 +1105,15 @@ class LmdbStore(Store):
 
 
     def _append(self, cur, values, **kwargs):
-        '''
+        """
         Append one or more values to the end of a database.
 
-        @param cur (lmdb.Cursor) The write cursor to act on.
-        @param data (list(bytes)) Value(s) to append.
+        :param lmdb.Cursor cur: The write cursor to act on.
+        :param list(bytes) values: Value(s) to append.
 
-        @return list(bytes) Last key(s) inserted.
-        '''
+        :rtype: list(memoryview)
+        :return: Last key(s) inserted.
+        """
         if not isinstance(values, list) and not isinstance(values, tuple):
             raise ValueError('Input must be a list or tuple.')
         data = []
@@ -1134,13 +1128,12 @@ class LmdbStore(Store):
 
 
     def _index_triple(self, action, spok):
-        '''
+        """
         Update index for a triple and context (add or remove).
 
-        @param action (string) 'add' or 'remove'.
-        @param spok (bytes) Triple key.
-        indexed. Context MUST be specified for 'add'.
-        '''
+        :param str action: 'add' or 'remove'.
+        :param bytes spok: Triple key.
+        """
         # Split and rearrange-join keys for association and indices.
         triple = self._split_key(spok)
         sk, pk, ok = triple
@@ -1173,13 +1166,14 @@ class LmdbStore(Store):
     ## debugging.
 
     def _keys_in_ctx(self, pk_ctx):
-        '''
+        """
         Convenience method to list all keys in a context.
 
-        @param pk_ctx (bytes) Pickled context URI.
+        :param bytes pk_ctx: Pickled context URI.
 
-        @return Iterator:tuple Generator of triples.
-        '''
+        :rtype: Iterator(tuple)
+        :return: Generator of triples.
+        """
         with self.cur('c:spo') as cur:
             if cur.set_key(pk_ctx):
                 tkeys = cur.iternext_dup()
@@ -1189,13 +1183,14 @@ class LmdbStore(Store):
 
 
     def _ctx_for_key(self, tkey):
-        '''
+        """
         Convenience method to list all contexts that a key is in.
 
-        @param tkey (bytes) Triple key.
+        :param bytes tkey: Triple key.
 
-        @return Iterator:URIRef Generator of context URIs.
-        '''
+        :rtype: Iterator(rdflib.URIRef)
+        :return: Generator of context URIs.
+        """
         with self.cur('spo:c') as cur:
             if cur.set_key(tkey):
                 ctx = cur.iternext_dup()